-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlogs.txt
1725 lines (1645 loc) · 236 KB
/
logs.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
==> 审计日志 <==
|---------|------------------------------------------------------|----------|-----------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|------------------------------------------------------|----------|-----------|---------|---------------------|---------------------|
| pause | | minikube | hellotalk | v1.33.1 | 04 Jul 24 19:25 CST | |
| start | --driver docker | minikube | hellotalk | v1.33.1 | 04 Jul 24 19:26 CST | |
| start | --driver docker | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:28 CST | |
| delete | | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:30 CST | 05 Jul 24 09:31 CST |
| start | --driver docker | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:31 CST | |
| start | --driver docker | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:53 CST | |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:54 CST | 05 Jul 24 09:54 CST |
| start | --driver docker | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:55 CST | |
| delete | | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:56 CST | 05 Jul 24 09:56 CST |
| start | --image-mirror-country=cn --driver docker | minikube | hellotalk | v1.33.1 | 05 Jul 24 09:56 CST | |
| | --image-repository=registry.cn-hangzhou.aliyuncs.com | | | | | |
| delete | | minikube | hellotalk | v1.33.1 | 05 Jul 24 10:22 CST | 05 Jul 24 10:22 CST |
| start | --image-mirror-country=cn | minikube | hellotalk | v1.33.1 | 05 Jul 24 10:23 CST | 05 Jul 24 10:23 CST |
| | --driver docker | | | | | |
| ip | | minikube | hellotalk | v1.33.1 | 05 Jul 24 10:48 CST | 05 Jul 24 10:48 CST |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 10:52 CST | |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 11:01 CST | |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 14:16 CST | |
| start | --image-mirror-country=cn | minikube | hellotalk | v1.33.1 | 05 Jul 24 14:16 CST | 05 Jul 24 14:16 CST |
| | --driver docker | | | | | |
| start | --image-mirror-country=cn | minikube | hellotalk | v1.33.1 | 05 Jul 24 15:05 CST | 05 Jul 24 15:05 CST |
| | --driver docker | | | | | |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 15:56 CST | |
| ssh | | minikube | hellotalk | v1.33.1 | 05 Jul 24 17:08 CST | |
| ip | | minikube | hellotalk | v1.33.1 | 08 Jul 24 11:33 CST | 08 Jul 24 11:33 CST |
| ssh | | minikube | hellotalk | v1.33.1 | 08 Jul 24 11:55 CST | |
| ssh | | minikube | hellotalk | v1.33.1 | 08 Jul 24 11:59 CST | 08 Jul 24 14:08 CST |
| service | --all | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:04 CST | 08 Jul 24 12:04 CST |
| service | -n prometheus | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:05 CST | |
| service | --all prometheus | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:05 CST | |
| service | prometheus --all | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:05 CST | |
| service | --all | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:05 CST | 08 Jul 24 12:06 CST |
| service | prometheus | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:21 CST | |
| service | prometheus -n prometheus | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:21 CST | |
| service | list | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:21 CST | 08 Jul 24 12:22 CST |
| ssh | | minikube | hellotalk | v1.33.1 | 08 Jul 24 12:34 CST | 08 Jul 24 14:08 CST |
| service | list | minikube | hellotalk | v1.33.1 | 08 Jul 24 14:34 CST | 08 Jul 24 14:34 CST |
|---------|------------------------------------------------------|----------|-----------|---------|---------------------|---------------------|
==> 上次启动 <==
Log file created at: 2024/07/05 15:05:46
Running on machine: hellotalk
Binary: Built with gc go1.22.1 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0705 15:05:46.912106 39530 out.go:291] Setting OutFile to fd 1 ...
I0705 15:05:46.912244 39530 out.go:343] isatty.IsTerminal(1) = true
I0705 15:05:46.912247 39530 out.go:304] Setting ErrFile to fd 2...
I0705 15:05:46.912250 39530 out.go:343] isatty.IsTerminal(2) = true
I0705 15:05:46.912422 39530 root.go:338] Updating PATH: /home/hellotalk/.minikube/bin
I0705 15:05:46.912721 39530 out.go:298] Setting JSON to false
I0705 15:05:46.913855 39530 start.go:129] hostinfo: {"hostname":"hellotalk","uptime":3107,"bootTime":1720160040,"procs":401,"os":"linux","platform":"Kylin","platformFamily":"debian","platformVersion":"V10","kernelVersion":"6.8.0-36-generic","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"host","hostId":"fe5c207a-b8d3-4039-ad19-89ea856d1eaf"}
I0705 15:05:46.913891 39530 start.go:139] virtualization: kvm host
I0705 15:05:46.919518 39530 out.go:177] 😄 Kylin V10 上的 minikube v1.33.1
I0705 15:05:46.930141 39530 notify.go:220] Checking for updates...
I0705 15:05:46.930546 39530 config.go:182] Loaded profile config "minikube": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.0
I0705 15:05:46.930982 39530 driver.go:392] Setting default libvirt URI to qemu:///system
I0705 15:05:46.943870 39530 docker.go:122] docker version: linux-27.0.3:Docker Engine - Community
I0705 15:05:46.943950 39530 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0705 15:05:46.971969 39530 info.go:266] docker info: {ID:52e42d06-3d35-4ae0-a5f4-ec96fa6ab5ad Containers:2 ContainersRunning:0 ContainersPaused:0 ContainersStopped:2 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:23 OomKillDisable:false NGoroutines:39 SystemTime:2024-07-05 15:05:46.966287306 +0800 CST LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-36-generic OperatingSystem:Ubuntu 24.04 LTS OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:33495568384 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:hellotalk Labels:[] ExperimentalBuild:false ServerVersion:27.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae71819c4f5e67bb4d5ae76a6b735f29cc25774e Expected:ae71819c4f5e67bb4d5ae76a6b735f29cc25774e} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.15.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.28.1]] Warnings:<nil>}}
I0705 15:05:46.972031 39530 docker.go:295] overlay module found
I0705 15:05:46.977410 39530 out.go:177] ✨ 根据现有的配置文件使用 docker 驱动程序
I0705 15:05:46.982750 39530 start.go:297] selected driver: docker
I0705 15:05:46.982754 39530 start.go:901] validating driver "docker" against &{Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e Memory:7900 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.0 ClusterName:minikube Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.30.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/hellotalk:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0705 15:05:46.982803 39530 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0705 15:05:46.982875 39530 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0705 15:05:47.011983 39530 info.go:266] docker info: {ID:52e42d06-3d35-4ae0-a5f4-ec96fa6ab5ad Containers:2 ContainersRunning:0 ContainersPaused:0 ContainersStopped:2 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:23 OomKillDisable:false NGoroutines:39 SystemTime:2024-07-05 15:05:47.00658252 +0800 CST LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.8.0-36-generic OperatingSystem:Ubuntu 24.04 LTS OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:33495568384 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:hellotalk Labels:[] ExperimentalBuild:false ServerVersion:27.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae71819c4f5e67bb4d5ae76a6b735f29cc25774e Expected:ae71819c4f5e67bb4d5ae76a6b735f29cc25774e} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.15.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.28.1]] Warnings:<nil>}}
I0705 15:05:47.012686 39530 cni.go:84] Creating CNI manager for ""
I0705 15:05:47.012694 39530 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0705 15:05:47.012725 39530 start.go:340] cluster config:
{Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e Memory:7900 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.0 ClusterName:minikube Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.30.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/hellotalk:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0705 15:05:47.018234 39530 out.go:177] 👍 Starting "minikube" primary control-plane node in "minikube" cluster
I0705 15:05:47.023636 39530 cache.go:121] Beginning downloading kic base image for docker with docker
I0705 15:05:47.029044 39530 out.go:177] 🚜 Pulling base image v0.0.44 ...
I0705 15:05:47.034450 39530 image.go:79] Checking for docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e in local docker daemon
I0705 15:05:47.034495 39530 profile.go:143] Saving config to /home/hellotalk/.minikube/profiles/minikube/config.json ...
I0705 15:05:47.034650 39530 cache.go:107] acquiring lock: {Name:mk3486a61b8833261f53dde7c7c967e1a0ea0353 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034654 39530 cache.go:107] acquiring lock: {Name:mke9c22e408c718e7d0813b8d7319e67d194f1c8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034670 39530 cache.go:107] acquiring lock: {Name:mk8e7b3ba072d98ed8517e92768f2c23314593e1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034699 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy_v1.30.0 exists
I0705 15:05:47.034694 39530 cache.go:107] acquiring lock: {Name:mk20615de9615d94c8b64589db299690aef1ff1a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034700 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner_v5 exists
I0705 15:05:47.034706 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.30.0" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy_v1.30.0" took 56.849µs
I0705 15:05:47.034707 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner_v5" took 63.94µs
I0705 15:05:47.034688 39530 cache.go:107] acquiring lock: {Name:mke45c653f7509c7bf1727b48f8eed09fe2e8a04 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034711 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.30.0 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy_v1.30.0 succeeded
I0705 15:05:47.034712 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner_v5 succeeded
I0705 15:05:47.034722 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver_v1.30.0 exists
I0705 15:05:47.034727 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.30.0" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver_v1.30.0" took 34.206µs
I0705 15:05:47.034734 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.30.0 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver_v1.30.0 succeeded
I0705 15:05:47.034745 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/pause_3.9 exists
I0705 15:05:47.034751 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/pause_3.9" took 70.247µs
I0705 15:05:47.034758 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/pause_3.9 succeeded
I0705 15:05:47.034751 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/etcd_3.5.12-0 exists
I0705 15:05:47.034747 39530 cache.go:107] acquiring lock: {Name:mk6446cd5bdbfe589aabaa4f5e11f42337ed2e06 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034766 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.12-0" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/etcd_3.5.12-0" took 96.673µs
I0705 15:05:47.034781 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.12-0 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/etcd_3.5.12-0 succeeded
I0705 15:05:47.034769 39530 cache.go:107] acquiring lock: {Name:mk84c8ae1d1eff4d92853edf7d7808f2f572c555 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034764 39530 cache.go:107] acquiring lock: {Name:mkd981ff70f0bffcc2fbc7c2d5070efa2c5c631a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.034838 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/coredns_v1.11.1 exists
I0705 15:05:47.034844 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager_v1.30.0 exists
I0705 15:05:47.034850 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.30.0" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager_v1.30.0" took 114.897µs
I0705 15:05:47.034848 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.1" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/coredns_v1.11.1" took 129.542µs
I0705 15:05:47.034854 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.30.0 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager_v1.30.0 succeeded
I0705 15:05:47.034858 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.1 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/coredns_v1.11.1 succeeded
I0705 15:05:47.034857 39530 cache.go:115] /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler_v1.30.0 exists
I0705 15:05:47.034869 39530 cache.go:96] cache image "registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.30.0" -> "/home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler_v1.30.0" took 126.516µs
I0705 15:05:47.034876 39530 cache.go:80] save to tar file registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.30.0 -> /home/hellotalk/.minikube/cache/images/amd64/registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler_v1.30.0 succeeded
I0705 15:05:47.034882 39530 cache.go:87] Successfully saved all images to host disk.
I0705 15:05:47.043885 39530 image.go:83] Found docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e in local docker daemon, skipping pull
I0705 15:05:47.043894 39530 cache.go:144] docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e exists in daemon, skipping load
I0705 15:05:47.043908 39530 cache.go:194] Successfully downloaded all kic artifacts
I0705 15:05:47.043927 39530 start.go:360] acquireMachinesLock for minikube: {Name:mk4b960829c03ddc3010fac6bdb17168cd8bb28c Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0705 15:05:47.043963 39530 start.go:364] duration metric: took 26.304µs to acquireMachinesLock for "minikube"
I0705 15:05:47.043971 39530 start.go:96] Skipping create...Using existing machine configuration
I0705 15:05:47.043974 39530 fix.go:54] fixHost starting:
I0705 15:05:47.044158 39530 cli_runner.go:164] Run: docker container inspect minikube --format={{.State.Status}}
I0705 15:05:47.053911 39530 fix.go:112] recreateIfNeeded on minikube: state=Stopped err=<nil>
W0705 15:05:47.053929 39530 fix.go:138] unexpected machine state, will restart: <nil>
I0705 15:05:47.059458 39530 out.go:177] 🔄 Restarting existing docker container for "minikube" ...
I0705 15:05:47.064918 39530 cli_runner.go:164] Run: docker start minikube
I0705 15:05:47.275357 39530 cli_runner.go:164] Run: docker container inspect minikube --format={{.State.Status}}
I0705 15:05:47.285108 39530 kic.go:430] container "minikube" state is running.
I0705 15:05:47.285427 39530 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube
I0705 15:05:47.295681 39530 profile.go:143] Saving config to /home/hellotalk/.minikube/profiles/minikube/config.json ...
I0705 15:05:47.295832 39530 machine.go:94] provisionDockerMachine start ...
I0705 15:05:47.295881 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:47.305704 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:47.305840 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:47.305844 39530 main.go:141] libmachine: About to run SSH command:
hostname
I0705 15:05:47.306220 39530 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:36024->127.0.0.1:32768: read: connection reset by peer
I0705 15:05:50.428765 39530 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0705 15:05:50.428783 39530 ubuntu.go:169] provisioning hostname "minikube"
I0705 15:05:50.428884 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:50.441079 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:50.441220 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:50.441225 39530 main.go:141] libmachine: About to run SSH command:
sudo hostname minikube && echo "minikube" | sudo tee /etc/hostname
I0705 15:05:50.602215 39530 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0705 15:05:50.602273 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:50.612194 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:50.612313 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:50.612321 39530 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sminikube' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 minikube/g' /etc/hosts;
else
echo '127.0.1.1 minikube' | sudo tee -a /etc/hosts;
fi
fi
I0705 15:05:50.731957 39530 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0705 15:05:50.731985 39530 ubuntu.go:175] set auth options {CertDir:/home/hellotalk/.minikube CaCertPath:/home/hellotalk/.minikube/certs/ca.pem CaPrivateKeyPath:/home/hellotalk/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/hellotalk/.minikube/machines/server.pem ServerKeyPath:/home/hellotalk/.minikube/machines/server-key.pem ClientKeyPath:/home/hellotalk/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/hellotalk/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/hellotalk/.minikube}
I0705 15:05:50.732037 39530 ubuntu.go:177] setting up certificates
I0705 15:05:50.732055 39530 provision.go:84] configureAuth start
I0705 15:05:50.732270 39530 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube
I0705 15:05:50.743679 39530 provision.go:143] copyHostCerts
I0705 15:05:50.743708 39530 exec_runner.go:144] found /home/hellotalk/.minikube/ca.pem, removing ...
I0705 15:05:50.743716 39530 exec_runner.go:203] rm: /home/hellotalk/.minikube/ca.pem
I0705 15:05:50.743760 39530 exec_runner.go:151] cp: /home/hellotalk/.minikube/certs/ca.pem --> /home/hellotalk/.minikube/ca.pem (1086 bytes)
I0705 15:05:50.743830 39530 exec_runner.go:144] found /home/hellotalk/.minikube/cert.pem, removing ...
I0705 15:05:50.743833 39530 exec_runner.go:203] rm: /home/hellotalk/.minikube/cert.pem
I0705 15:05:50.743855 39530 exec_runner.go:151] cp: /home/hellotalk/.minikube/certs/cert.pem --> /home/hellotalk/.minikube/cert.pem (1131 bytes)
I0705 15:05:50.743899 39530 exec_runner.go:144] found /home/hellotalk/.minikube/key.pem, removing ...
I0705 15:05:50.743901 39530 exec_runner.go:203] rm: /home/hellotalk/.minikube/key.pem
I0705 15:05:50.743921 39530 exec_runner.go:151] cp: /home/hellotalk/.minikube/certs/key.pem --> /home/hellotalk/.minikube/key.pem (1679 bytes)
I0705 15:05:50.743963 39530 provision.go:117] generating server cert: /home/hellotalk/.minikube/machines/server.pem ca-key=/home/hellotalk/.minikube/certs/ca.pem private-key=/home/hellotalk/.minikube/certs/ca-key.pem org=hellotalk.minikube san=[127.0.0.1 192.168.58.2 localhost minikube]
I0705 15:05:50.851161 39530 provision.go:177] copyRemoteCerts
I0705 15:05:50.851202 39530 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0705 15:05:50.851232 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:50.860955 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:50.948555 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1086 bytes)
I0705 15:05:50.964798 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/machines/server.pem --> /etc/docker/server.pem (1188 bytes)
I0705 15:05:50.980690 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0705 15:05:50.996812 39530 provision.go:87] duration metric: took 264.748284ms to configureAuth
I0705 15:05:50.996823 39530 ubuntu.go:193] setting minikube options for container-runtime
I0705 15:05:50.996922 39530 config.go:182] Loaded profile config "minikube": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.0
I0705 15:05:50.996962 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.006899 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:51.007014 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:51.007019 39530 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0705 15:05:51.123460 39530 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0705 15:05:51.123471 39530 ubuntu.go:71] root file system type: overlay
I0705 15:05:51.123534 39530 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0705 15:05:51.123584 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.133646 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:51.133765 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:51.133809 39530 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %!s(MISSING) "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0705 15:05:51.294209 39530 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0705 15:05:51.294407 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.306326 39530 main.go:141] libmachine: Using SSH client type: native
I0705 15:05:51.306443 39530 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82d6e0] 0x830440 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0705 15:05:51.306452 39530 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0705 15:05:51.426332 39530 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0705 15:05:51.426345 39530 machine.go:97] duration metric: took 4.130507001s to provisionDockerMachine
I0705 15:05:51.426351 39530 start.go:293] postStartSetup for "minikube" (driver="docker")
I0705 15:05:51.426358 39530 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0705 15:05:51.426414 39530 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0705 15:05:51.426451 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.437163 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:51.531064 39530 ssh_runner.go:195] Run: cat /etc/os-release
I0705 15:05:51.537442 39530 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0705 15:05:51.537485 39530 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0705 15:05:51.537501 39530 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0705 15:05:51.537510 39530 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0705 15:05:51.537524 39530 filesync.go:126] Scanning /home/hellotalk/.minikube/addons for local assets ...
I0705 15:05:51.537607 39530 filesync.go:126] Scanning /home/hellotalk/.minikube/files for local assets ...
I0705 15:05:51.537641 39530 start.go:296] duration metric: took 111.283267ms for postStartSetup
I0705 15:05:51.537741 39530 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0705 15:05:51.537822 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.548585 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:51.630635 39530 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0705 15:05:51.633211 39530 fix.go:56] duration metric: took 4.589233477s for fixHost
I0705 15:05:51.633219 39530 start.go:83] releasing machines lock for "minikube", held for 4.589251399s
I0705 15:05:51.633263 39530 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" minikube
I0705 15:05:51.643033 39530 ssh_runner.go:195] Run: cat /version.json
I0705 15:05:51.643067 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.643089 39530 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.cn-hangzhou.aliyuncs.com/google_containers/
I0705 15:05:51.643131 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:51.653390 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:51.653466 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:51.890288 39530 ssh_runner.go:195] Run: systemctl --version
I0705 15:05:51.893379 39530 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0705 15:05:51.896601 39530 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0705 15:05:51.909486 39530 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0705 15:05:51.909535 39530 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0705 15:05:51.915909 39530 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I0705 15:05:51.921212 39530 start.go:494] detecting cgroup driver to use...
I0705 15:05:51.921236 39530 detect.go:199] detected "systemd" cgroup driver on host os
I0705 15:05:51.921300 39530 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0705 15:05:51.933550 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9"|' /etc/containerd/config.toml"
I0705 15:05:51.943524 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0705 15:05:51.953626 39530 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0705 15:05:51.953686 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0705 15:05:51.963967 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0705 15:05:51.972513 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0705 15:05:51.981302 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0705 15:05:51.988377 39530 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0705 15:05:51.994489 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0705 15:05:52.000812 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0705 15:05:52.006985 39530 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0705 15:05:52.013485 39530 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0705 15:05:52.018908 39530 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0705 15:05:52.024255 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:52.070521 39530 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0705 15:05:52.124422 39530 start.go:494] detecting cgroup driver to use...
I0705 15:05:52.124444 39530 detect.go:199] detected "systemd" cgroup driver on host os
I0705 15:05:52.124494 39530 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0705 15:05:52.135526 39530 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0705 15:05:52.135585 39530 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0705 15:05:52.143752 39530 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0705 15:05:52.155847 39530 ssh_runner.go:195] Run: which cri-dockerd
I0705 15:05:52.158144 39530 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0705 15:05:52.164155 39530 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (225 bytes)
I0705 15:05:52.177441 39530 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0705 15:05:52.225990 39530 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0705 15:05:52.273173 39530 docker.go:574] configuring docker to use "systemd" as cgroup driver...
I0705 15:05:52.273239 39530 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0705 15:05:52.285749 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:52.332354 39530 ssh_runner.go:195] Run: sudo systemctl restart docker
I0705 15:05:53.159250 39530 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0705 15:05:53.166868 39530 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I0705 15:05:53.174507 39530 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0705 15:05:53.181717 39530 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0705 15:05:53.226925 39530 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0705 15:05:53.272606 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:53.316958 39530 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0705 15:05:53.332546 39530 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0705 15:05:53.339981 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:53.384925 39530 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0705 15:05:53.445055 39530 start.go:541] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0705 15:05:53.445128 39530 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0705 15:05:53.447445 39530 start.go:562] Will wait 60s for crictl version
I0705 15:05:53.447489 39530 ssh_runner.go:195] Run: which crictl
I0705 15:05:53.449545 39530 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0705 15:05:53.470316 39530 start.go:578] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 26.1.1
RuntimeApiVersion: v1
I0705 15:05:53.470368 39530 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0705 15:05:53.483500 39530 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0705 15:05:53.502430 39530 out.go:204] 🐳 正在 Docker 26.1.1 中准备 Kubernetes v1.30.0…
I0705 15:05:53.502504 39530 cli_runner.go:164] Run: docker network inspect minikube --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0705 15:05:53.512299 39530 ssh_runner.go:195] Run: grep 192.168.58.1 host.minikube.internal$ /etc/hosts
I0705 15:05:53.514751 39530 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.58.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0705 15:05:53.521793 39530 kubeadm.go:877] updating cluster {Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e Memory:7900 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.0 ClusterName:minikube Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.30.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/hellotalk:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0705 15:05:53.521855 39530 preload.go:132] Checking if preload exists for k8s version v1.30.0 and runtime docker
I0705 15:05:53.521873 39530 preload.go:147] Found local preload: /home/hellotalk/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.0-docker-overlay2-amd64.tar.lz4
I0705 15:05:53.521916 39530 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0705 15:05:53.533456 39530 docker.go:685] Got preloaded images: -- stdout --
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.12-0
costa92/traefik-plugin:2.9.10
costa92/my-blog:local-20231111194616
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.1
istio/proxyv2:1.15.6
istio/pilot:1.15.6
registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5
-- /stdout --
I0705 15:05:53.533465 39530 docker.go:615] Images already preloaded, skipping extraction
I0705 15:05:53.533510 39530 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0705 15:05:53.545139 39530 docker.go:685] Got preloaded images: -- stdout --
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.30.0
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.12-0
costa92/traefik-plugin:2.9.10
costa92/my-blog:local-20231111194616
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.1
istio/proxyv2:1.15.6
istio/pilot:1.15.6
registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5
-- /stdout --
I0705 15:05:53.545147 39530 cache_images.go:84] Images are preloaded, skipping loading
I0705 15:05:53.545153 39530 kubeadm.go:928] updating node { 192.168.58.2 8443 v1.30.0 docker true true} ...
I0705 15:05:53.545221 39530 kubeadm.go:940] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.30.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.58.2
[Install]
config:
{KubernetesVersion:v1.30.0 ClusterName:minikube Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0705 15:05:53.545296 39530 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0705 15:05:53.573046 39530 cni.go:84] Creating CNI manager for ""
I0705 15:05:53.573057 39530 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0705 15:05:53.573062 39530 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0705 15:05:53.573076 39530 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.58.2 APIServerPort:8443 KubernetesVersion:v1.30.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:minikube NodeName:minikube DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.58.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.58.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0705 15:05:53.573182 39530 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.58.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "minikube"
kubeletExtraArgs:
node-ip: 192.168.58.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.58.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.30.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0705 15:05:53.573228 39530 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.30.0
I0705 15:05:53.579256 39530 binaries.go:44] Found k8s binaries, skipping transfer
I0705 15:05:53.579302 39530 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0705 15:05:53.584849 39530 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (307 bytes)
I0705 15:05:53.596547 39530 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0705 15:05:53.608196 39530 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2218 bytes)
I0705 15:05:53.619875 39530 ssh_runner.go:195] Run: grep 192.168.58.2 control-plane.minikube.internal$ /etc/hosts
I0705 15:05:53.621980 39530 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.58.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0705 15:05:53.628837 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:53.675806 39530 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0705 15:05:53.696622 39530 certs.go:68] Setting up /home/hellotalk/.minikube/profiles/minikube for IP: 192.168.58.2
I0705 15:05:53.696630 39530 certs.go:194] generating shared ca certs ...
I0705 15:05:53.696640 39530 certs.go:226] acquiring lock for ca certs: {Name:mk13fbde79f9491c95dda7e6b6ac4e7c3fe829b3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0705 15:05:53.696723 39530 certs.go:235] skipping valid "minikubeCA" ca cert: /home/hellotalk/.minikube/ca.key
I0705 15:05:53.696747 39530 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/hellotalk/.minikube/proxy-client-ca.key
I0705 15:05:53.696752 39530 certs.go:256] generating profile certs ...
I0705 15:05:53.696804 39530 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/hellotalk/.minikube/profiles/minikube/client.key
I0705 15:05:53.696829 39530 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/hellotalk/.minikube/profiles/minikube/apiserver.key.502bbb95
I0705 15:05:53.696856 39530 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/hellotalk/.minikube/profiles/minikube/proxy-client.key
I0705 15:05:53.696924 39530 certs.go:484] found cert: /home/hellotalk/.minikube/certs/ca-key.pem (1679 bytes)
I0705 15:05:53.696944 39530 certs.go:484] found cert: /home/hellotalk/.minikube/certs/ca.pem (1086 bytes)
I0705 15:05:53.696959 39530 certs.go:484] found cert: /home/hellotalk/.minikube/certs/cert.pem (1131 bytes)
I0705 15:05:53.696972 39530 certs.go:484] found cert: /home/hellotalk/.minikube/certs/key.pem (1679 bytes)
I0705 15:05:53.697376 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0705 15:05:53.715478 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0705 15:05:53.733161 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0705 15:05:53.754060 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0705 15:05:53.773287 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/profiles/minikube/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1411 bytes)
I0705 15:05:53.793406 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/profiles/minikube/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0705 15:05:53.811803 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/profiles/minikube/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0705 15:05:53.829222 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/profiles/minikube/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0705 15:05:53.845638 39530 ssh_runner.go:362] scp /home/hellotalk/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0705 15:05:53.863845 39530 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0705 15:05:53.876211 39530 ssh_runner.go:195] Run: openssl version
I0705 15:05:53.880044 39530 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0705 15:05:53.887654 39530 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0705 15:05:53.889869 39530 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jul 5 01:28 /usr/share/ca-certificates/minikubeCA.pem
I0705 15:05:53.889898 39530 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0705 15:05:53.894232 39530 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0705 15:05:53.900981 39530 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0705 15:05:53.903690 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I0705 15:05:53.908353 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I0705 15:05:53.912831 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I0705 15:05:53.917206 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I0705 15:05:53.922021 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I0705 15:05:53.926564 39530 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I0705 15:05:53.930740 39530 kubeadm.go:391] StartCluster: {Name:minikube KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:docker.io/kicbase/stable:v0.0.44@sha256:eb04641328b06c5c4a14f4348470e1046bbcf9c2cbc551486e343d3a49db557e Memory:7900 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.0 ClusterName:minikube Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository:registry.cn-hangzhou.aliyuncs.com/google_containers LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.30.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/hellotalk:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0705 15:05:53.930814 39530 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0705 15:05:53.942378 39530 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
W0705 15:05:53.948308 39530 kubeadm.go:404] apiserver tunnel failed: apiserver port not set
I0705 15:05:53.948314 39530 kubeadm.go:407] found existing configuration files, will attempt cluster restart
I0705 15:05:53.948322 39530 kubeadm.go:587] restartPrimaryControlPlane start ...
I0705 15:05:53.948355 39530 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0705 15:05:53.954722 39530 kubeadm.go:129] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0705 15:05:53.955443 39530 kubeconfig.go:125] found "minikube" server: "https://192.168.58.2:8443"
I0705 15:05:53.957231 39530 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0705 15:05:53.963604 39530 kubeadm.go:624] The running cluster does not require reconfiguration: 192.168.58.2
I0705 15:05:53.963620 39530 kubeadm.go:591] duration metric: took 15.294444ms to restartPrimaryControlPlane
I0705 15:05:53.963625 39530 kubeadm.go:393] duration metric: took 32.889803ms to StartCluster
I0705 15:05:53.963634 39530 settings.go:142] acquiring lock: {Name:mk9f31ba78da5a548d61d66cdfeef138d1422ea5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0705 15:05:53.963666 39530 settings.go:150] Updating kubeconfig: /home/hellotalk/.kube/config
I0705 15:05:53.964682 39530 lock.go:35] WriteFile acquiring /home/hellotalk/.kube/config: {Name:mkef2c36c5d89913cd310795ec667b46dc1413d0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0705 15:05:53.964840 39530 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.58.2 Port:8443 KubernetesVersion:v1.30.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0705 15:05:53.970338 39530 out.go:177] 🔎 正在验证 Kubernetes 组件...
I0705 15:05:53.964874 39530 addons.go:502] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volumesnapshots:false yakd:false]
I0705 15:05:53.964979 39530 config.go:182] Loaded profile config "minikube": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.30.0
I0705 15:05:53.981169 39530 addons.go:69] Setting storage-provisioner=true in profile "minikube"
I0705 15:05:53.981172 39530 addons.go:69] Setting default-storageclass=true in profile "minikube"
I0705 15:05:53.981192 39530 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0705 15:05:53.981192 39530 addons.go:234] Setting addon storage-provisioner=true in "minikube"
I0705 15:05:53.981196 39530 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "minikube"
W0705 15:05:53.981199 39530 addons.go:243] addon storage-provisioner should already be in state true
I0705 15:05:53.981225 39530 host.go:66] Checking if "minikube" exists ...
I0705 15:05:53.981391 39530 cli_runner.go:164] Run: docker container inspect minikube --format={{.State.Status}}
I0705 15:05:53.981458 39530 cli_runner.go:164] Run: docker container inspect minikube --format={{.State.Status}}
I0705 15:05:53.993436 39530 addons.go:234] Setting addon default-storageclass=true in "minikube"
I0705 15:05:53.997968 39530 out.go:177] ▪ 正在使用镜像 registry.cn-hangzhou.aliyuncs.com/google_containers/storage-provisioner:v5
W0705 15:05:53.997973 39530 addons.go:243] addon default-storageclass should already be in state true
I0705 15:05:53.997999 39530 host.go:66] Checking if "minikube" exists ...
I0705 15:05:54.003644 39530 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0705 15:05:54.003654 39530 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2708 bytes)
I0705 15:05:54.003716 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:54.004031 39530 cli_runner.go:164] Run: docker container inspect minikube --format={{.State.Status}}
I0705 15:05:54.014775 39530 addons.go:426] installing /etc/kubernetes/addons/storageclass.yaml
I0705 15:05:54.014782 39530 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0705 15:05:54.014838 39530 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" minikube
I0705 15:05:54.014851 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:54.025631 39530 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/hellotalk/.minikube/machines/minikube/id_rsa Username:docker}
I0705 15:05:54.057425 39530 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0705 15:05:54.066722 39530 api_server.go:52] waiting for apiserver process to appear ...
I0705 15:05:54.066781 39530 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0705 15:05:54.106530 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0705 15:05:54.116015 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
W0705 15:05:54.144640 39530 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.144654 39530 retry.go:31] will retry after 301.355159ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
W0705 15:05:54.154247 39530 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.154263 39530 retry.go:31] will retry after 261.844435ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.416673 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I0705 15:05:54.446499 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
W0705 15:05:54.458433 39530 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.458446 39530 retry.go:31] will retry after 245.39495ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storageclass.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
W0705 15:05:54.487361 39530 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.487374 39530 retry.go:31] will retry after 450.225233ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: Process exited with status 1
stdout:
stderr:
error: error validating "/etc/kubernetes/addons/storage-provisioner.yaml": error validating data: failed to download openapi: Get "https://localhost:8443/openapi/v2?timeout=32s": dial tcp [::1]:8443: connect: connection refused; if you choose to ignore these errors, turn validation off with --validate=false
I0705 15:05:54.567559 39530 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0705 15:05:54.704359 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml
I0705 15:05:54.938544 39530 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml
I0705 15:05:55.067545 39530 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0705 15:05:55.985692 39530 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storageclass.yaml: (1.281317328s)
I0705 15:05:56.286991 39530 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.0/kubectl apply --force -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.348427852s)
I0705 15:05:56.287026 39530 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (1.219468541s)
I0705 15:05:56.287036 39530 api_server.go:72] duration metric: took 2.32218398s to wait for apiserver process to appear ...
I0705 15:05:56.287040 39530 api_server.go:88] waiting for apiserver healthz status ...
I0705 15:05:56.287054 39530 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
I0705 15:05:56.289534 39530 api_server.go:279] https://192.168.58.2:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0705 15:05:56.305313 39530 out.go:177] 🌟 启用插件: default-storageclass, storage-provisioner
I0705 15:05:56.310635 39530 addons.go:505] duration metric: took 2.345761936s for enable addons: enabled=[default-storageclass storage-provisioner]
W0705 15:05:56.305329 39530 api_server.go:103] status: https://192.168.58.2:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0705 15:05:56.787689 39530 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
I0705 15:05:56.790327 39530 api_server.go:279] https://192.168.58.2:8443/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W0705 15:05:56.790338 39530 api_server.go:103] status: https://192.168.58.2:8443/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0705 15:05:57.288080 39530 api_server.go:253] Checking apiserver healthz at https://192.168.58.2:8443/healthz ...
I0705 15:05:57.290550 39530 api_server.go:279] https://192.168.58.2:8443/healthz returned 200:
ok
I0705 15:05:57.291040 39530 api_server.go:141] control plane version: v1.30.0
I0705 15:05:57.291047 39530 api_server.go:131] duration metric: took 1.004003808s to wait for apiserver health ...
I0705 15:05:57.291052 39530 system_pods.go:43] waiting for kube-system pods to appear ...
I0705 15:05:57.295395 39530 system_pods.go:59] 7 kube-system pods found
I0705 15:05:57.295408 39530 system_pods.go:61] "coredns-7c445c467-g79kg" [aa59004b-3cbd-4ff5-bde6-d4f26aea71bc] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0705 15:05:57.295413 39530 system_pods.go:61] "etcd-minikube" [bbddfc00-09a2-4f17-8339-13d9757d182d] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0705 15:05:57.295417 39530 system_pods.go:61] "kube-apiserver-minikube" [4f8ce07b-3f00-4415-ba4f-43386508ebd1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0705 15:05:57.295421 39530 system_pods.go:61] "kube-controller-manager-minikube" [c590ecb6-3885-48b4-8c6d-2f61f4cc8184] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0705 15:05:57.295424 39530 system_pods.go:61] "kube-proxy-nx76r" [84bf6f89-6056-433e-8d73-a7b1bc0179d7] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0705 15:05:57.295427 39530 system_pods.go:61] "kube-scheduler-minikube" [d8952b3e-a5a0-495c-b79a-f3e5b897fcc5] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0705 15:05:57.295429 39530 system_pods.go:61] "storage-provisioner" [ae37d88d-9905-4974-8e4d-04f44e84dcba] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0705 15:05:57.295434 39530 system_pods.go:74] duration metric: took 4.378238ms to wait for pod list to return data ...
I0705 15:05:57.295441 39530 kubeadm.go:576] duration metric: took 3.330587988s to wait for: map[apiserver:true system_pods:true]
I0705 15:05:57.295448 39530 node_conditions.go:102] verifying NodePressure condition ...
I0705 15:05:57.297209 39530 node_conditions.go:122] node storage ephemeral capacity is 490616760Ki
I0705 15:05:57.297219 39530 node_conditions.go:123] node cpu capacity is 12
I0705 15:05:57.297225 39530 node_conditions.go:105] duration metric: took 1.774525ms to run NodePressure ...
I0705 15:05:57.297231 39530 start.go:240] waiting for startup goroutines ...
I0705 15:05:57.297235 39530 start.go:245] waiting for cluster config update ...
I0705 15:05:57.297242 39530 start.go:254] writing updated cluster config ...
I0705 15:05:57.297395 39530 ssh_runner.go:195] Run: rm -f paused
I0705 15:05:57.332755 39530 start.go:600] kubectl: 1.30.0, cluster: 1.30.0 (minor skew: 0)
I0705 15:05:57.337559 39530 out.go:177] 🏄 完成!kubectl 现在已配置,默认使用"minikube"集群和"default"命名空间
==> Docker <==
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:18:41 minikube dockerd[941]: 2024/07/08 04:18:41 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:12 minikube dockerd[941]: 2024/07/08 04:34:12 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:12 minikube dockerd[941]: 2024/07/08 04:34:12 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:12 minikube dockerd[941]: 2024/07/08 04:34:12 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:12 minikube dockerd[941]: 2024/07/08 04:34:12 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:13 minikube dockerd[941]: 2024/07/08 04:34:13 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:28 minikube dockerd[941]: 2024/07/08 04:34:28 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:28 minikube dockerd[941]: 2024/07/08 04:34:28 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 04:34:29 minikube dockerd[941]: 2024/07/08 04:34:29 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:45 minikube dockerd[941]: 2024/07/08 06:10:45 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:45 minikube dockerd[941]: 2024/07/08 06:10:45 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:10:46 minikube dockerd[941]: 2024/07/08 06:10:46 http: superfluous response.WriteHeader call from go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.(*respWriterWrapper).WriteHeader (wrap.go:98)
Jul 08 06:49:08 minikube dockerd[941]: time="2024-07-08T06:49:08.874718622Z" level=info msg="ignoring event" container=5f701fef7925ad6c139f45592c637ff2b98df468660bd17c3408d7b21f9b0f08 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Jul 08 06:49:08 minikube dockerd[941]: time="2024-07-08T06:49:08.931623194Z" level=info msg="ignoring event" container=639984f80550e76dc879d3dcc966b44863d2c288ef3ac6ca67a1764b367b1c6c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Jul 08 06:49:09 minikube cri-dockerd[1202]: time="2024-07-08T06:49:09Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d9feeff1448d69aae9947636ed90a1dc21eb8fdb0f585902ec78957c52f59b9f/resolv.conf as [nameserver 192.168.58.1 options edns0 trust-ad ndots:0]"
Jul 08 06:49:12 minikube cri-dockerd[1202]: time="2024-07-08T06:49:12Z" level=error msg="error getting RW layer size for container ID 'b7615fc7a1b9495c88508f12983858fa9ae9d16113c602daad95ad5dea12839e': Error response from daemon: No such container: b7615fc7a1b9495c88508f12983858fa9ae9d16113c602daad95ad5dea12839e"
Jul 08 06:49:12 minikube cri-dockerd[1202]: time="2024-07-08T06:49:12Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'b7615fc7a1b9495c88508f12983858fa9ae9d16113c602daad95ad5dea12839e'"
Jul 08 06:49:12 minikube cri-dockerd[1202]: time="2024-07-08T06:49:12Z" level=error msg="error getting RW layer size for container ID '5f701fef7925ad6c139f45592c637ff2b98df468660bd17c3408d7b21f9b0f08': Error response from daemon: No such container: 5f701fef7925ad6c139f45592c637ff2b98df468660bd17c3408d7b21f9b0f08"
Jul 08 06:49:12 minikube cri-dockerd[1202]: time="2024-07-08T06:49:12Z" level=error msg="Set backoffDuration to : 1m0s for container ID '5f701fef7925ad6c139f45592c637ff2b98df468660bd17c3408d7b21f9b0f08'"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
e474f8be7678f a0bf559e280cf 3 minutes ago Running kube-proxy 0 d9feeff1448d6 kube-proxy-r82ck
e28ef531d87ef prom/prometheus@sha256:15ccbb1cec5fad2cd9f20f574ba5a4dd4160e8472213c76faac17f6481cb6a75 3 hours ago Running prometheus 0 72b881692fbc6 prometheus-cdbc74675-qk5x9
14bd235c81f53 9beeba249f3ee 2 days ago Running server 0 90ebee0138f26 sammy-app-dev-79d5c7f686-drvhg
7946fadd3dfe5 6e38f40d628db 3 days ago Running storage-provisioner 5 6806aa93c174c storage-provisioner
ff442c0870982 6de25f09e41b3 3 days ago Running server 2 70ae7abd343bc blog-web-dev-5db6d8554b-7dn4g
c982c09b63bd3 89a3c9d82f7aa 3 days ago Running traefik 2 e38001b3966a5 traefik-deployment-9f49c9d87-5z4kx
3fe399b562c80 2abb8b4241bb1 3 days ago Running istio-proxy 2 9959607d49f04 istio-egressgateway-77d7544cf7-jch6b
4571ae42f4ac9 2abb8b4241bb1 3 days ago Running istio-proxy 2 493aa5cc036f7 istio-ingressgateway-ff47dcc89-mjj92
7fe4a0ae6607a cf42fa92eee0d 3 days ago Running discovery 2 f4cac25f56930 istiod-7c948c8756-4mcdl
5c627330e574e 6e38f40d628db 3 days ago Exited storage-provisioner 4 6806aa93c174c storage-provisioner
41975226fa7f2 cbb01a7bd410d 3 days ago Running coredns 2 de7b0a32c65cc coredns-7c445c467-g79kg
cf4d19cdbdb9b 3861cfcd7c04c 3 days ago Running etcd 2 94fe84a2c0173 etcd-minikube
6765934f60a26 c7aad43836fa5 3 days ago Running kube-controller-manager 2 b14e623e11835 kube-controller-manager-minikube
88f69a23f2def c42f13656d0b2 3 days ago Running kube-apiserver 2 069994915ee2c kube-apiserver-minikube
416e9dc66185d 259c8277fcbbc 3 days ago Running kube-scheduler 2 49f50121ca708 kube-scheduler-minikube
80f8a9d8e59dc 89a3c9d82f7aa 3 days ago Exited traefik 1 8dc37659bfd7f traefik-deployment-9f49c9d87-5z4kx
3becec6102d61 6de25f09e41b3 3 days ago Exited server 1 abfaff73e820b blog-web-dev-5db6d8554b-7dn4g
84928e1aee7c2 cbb01a7bd410d 3 days ago Exited coredns 1 d0582644f9fe9 coredns-7c445c467-g79kg
d1623cd7dba43 2abb8b4241bb1 3 days ago Exited istio-proxy 1 78cee6a02f19f istio-egressgateway-77d7544cf7-jch6b
7c56b45b6282f cf42fa92eee0d 3 days ago Exited discovery 1 4861de0d028bb istiod-7c948c8756-4mcdl
9ffe2023ed242 2abb8b4241bb1 3 days ago Exited istio-proxy 1 358dd1bea6fe6 istio-ingressgateway-ff47dcc89-mjj92
b24dd1a5dfbcd 259c8277fcbbc 3 days ago Exited kube-scheduler 1 28ef00e5f1ad6 kube-scheduler-minikube
5902080ba6469 c7aad43836fa5 3 days ago Exited kube-controller-manager 1 08bf6a82fa8da kube-controller-manager-minikube
146fa6240acde 3861cfcd7c04c 3 days ago Exited etcd 1 917f765a96e69 etcd-minikube
bed85e4092426 c42f13656d0b2 3 days ago Exited kube-apiserver 1 009aa3cc23c03 kube-apiserver-minikube
==> coredns [41975226fa7f] <==
[INFO] 10.244.0.19:35221 - 39162 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000084262s
[INFO] 10.244.0.17:34997 - 28814 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000058412s
[INFO] 10.244.0.19:35221 - 28606 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000068771s
[INFO] 10.244.0.17:34997 - 43730 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000055231s
[INFO] 10.244.0.17:34997 - 29685 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000840776s
[INFO] 10.244.0.19:35221 - 23715 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.00091375s
[INFO] 10.244.0.17:49246 - 19382 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000103147s
[INFO] 10.244.0.17:49246 - 39744 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000068044s
[INFO] 10.244.0.17:49246 - 32501 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000050478s
[INFO] 10.244.0.19:44904 - 40745 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000028871s
[INFO] 10.244.0.19:44904 - 26518 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.00003401s
[INFO] 10.244.0.19:44904 - 63123 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000024184s
[INFO] 10.244.0.19:44904 - 22951 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.00038701s
[INFO] 10.244.0.17:49246 - 55124 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000865971s
[INFO] 10.244.0.17:55707 - 39257 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000134722s
[INFO] 10.244.0.19:41615 - 56317 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000088463s
[INFO] 10.244.0.17:55707 - 15760 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.00008855s
[INFO] 10.244.0.19:41615 - 28405 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000072897s
[INFO] 10.244.0.17:55707 - 51244 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000054784s
[INFO] 10.244.0.19:41615 - 57960 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000068401s
[INFO] 10.244.0.17:55707 - 9771 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.001151275s
[INFO] 10.244.0.19:41615 - 24526 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.001177489s
[INFO] 10.244.0.17:54490 - 56360 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000125813s
[INFO] 10.244.0.17:54490 - 28503 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000095302s
[INFO] 10.244.0.19:43241 - 29269 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000063672s
[INFO] 10.244.0.19:43241 - 52811 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000028603s
[INFO] 10.244.0.17:54490 - 44447 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000081085s
[INFO] 10.244.0.19:43241 - 39094 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000036961s
[INFO] 10.244.0.17:54490 - 20184 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000936228s
[INFO] 10.244.0.19:43241 - 28309 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000932826s
[INFO] 10.244.0.17:57881 - 64469 "AAAA IN istiod.istio-system.svc.istio-system.svc.cluster.local. udp 83 false 1232" NXDOMAIN qr,aa,rd 165 0.000127343s
[INFO] 10.244.0.17:33039 - 64386 "A IN istiod.istio-system.svc.istio-system.svc.cluster.local. udp 83 false 1232" NXDOMAIN qr,aa,rd 165 0.00016931s
[INFO] 10.244.0.17:55622 - 16449 "A IN istiod.istio-system.svc.svc.cluster.local. udp 70 false 1232" NXDOMAIN qr,aa,rd 152 0.000060227s
[INFO] 10.244.0.17:36112 - 52639 "AAAA IN istiod.istio-system.svc.svc.cluster.local. udp 70 false 1232" NXDOMAIN qr,aa,rd 152 0.000085346s
[INFO] 10.244.0.17:38650 - 23687 "A IN istiod.istio-system.svc.cluster.local. udp 66 false 1232" NOERROR qr,aa,rd 108 0.000066686s
[INFO] 10.244.0.17:46028 - 42926 "AAAA IN istiod.istio-system.svc.cluster.local. udp 66 false 1232" NOERROR qr,aa,rd 148 0.000099131s
[INFO] 10.244.0.19:57939 - 57262 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000114688s
[INFO] 10.244.0.17:60672 - 51160 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000048799s
[INFO] 10.244.0.19:57939 - 51783 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000074505s
[INFO] 10.244.0.17:60672 - 15308 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000031958s
[INFO] 10.244.0.19:57939 - 35065 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000056401s
[INFO] 10.244.0.17:60672 - 21835 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000030654s
[INFO] 10.244.0.17:60672 - 63936 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000840904s
[INFO] 10.244.0.19:57939 - 27760 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000917413s
[INFO] 10.244.0.19:57886 - 2001 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000102184s
[INFO] 10.244.0.19:57886 - 25280 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000059515s
[INFO] 10.244.0.17:50330 - 59468 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000043243s
[INFO] 10.244.0.17:50330 - 46131 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000031159s
[INFO] 10.244.0.19:57886 - 63935 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000073851s
[INFO] 10.244.0.17:50330 - 23747 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000030324s
[INFO] 10.244.0.17:50330 - 13503 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000783496s
[INFO] 10.244.0.19:57886 - 28831 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.000878934s
[INFO] 10.244.0.17:46753 - 60251 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.00009785s
[INFO] 10.244.0.19:48336 - 36306 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000088139s
[INFO] 10.244.0.17:46753 - 53306 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000075589s
[INFO] 10.244.0.19:48336 - 21876 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000048319s
[INFO] 10.244.0.17:46753 - 16671 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000050557s
[INFO] 10.244.0.19:48336 - 56758 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000067075s
[INFO] 10.244.0.17:46753 - 18678 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.001153341s
[INFO] 10.244.0.19:48336 - 43753 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,rd,ra 37 0.001175752s
==> coredns [84928e1aee7c] <==
[INFO] 10.244.0.9:32852 - 25042 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000035747s
[INFO] 10.244.0.9:32852 - 49228 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,aa,rd,ra 112 0.000024346s
[INFO] 10.244.0.11:58562 - 40114 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.00010194s
[INFO] 10.244.0.11:58562 - 49242 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000073121s
[INFO] 10.244.0.11:58562 - 64654 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000054712s
[INFO] 10.244.0.11:58562 - 31965 "A IN zipkin.istio-system. udp 37 false 512" NXDOMAIN qr,aa,rd,ra 112 0.000026179s
[INFO] 10.244.0.9:58780 - 33667 "A IN zipkin.istio-system.istio-system.svc.cluster.local. udp 68 false 512" NXDOMAIN qr,aa,rd 161 0.000061862s
[INFO] 10.244.0.9:58780 - 19339 "A IN zipkin.istio-system.svc.cluster.local. udp 55 false 512" NXDOMAIN qr,aa,rd 148 0.000041725s
[INFO] 10.244.0.9:58780 - 26502 "A IN zipkin.istio-system.cluster.local. udp 51 false 512" NXDOMAIN qr,aa,rd 144 0.000033484s