0%

K8S|2.集群(v.1.15.1)安装

系统基础配置

主机及IP地址规划

角色主机名IP地址
masterk8s-master192.168.111.201/24
nodek8s-node1192.168.111.202/24
nodek8s-node2192.168.111.203/24

设置系统主机名和Host文件

1
2
3
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
1
2
3
4
5
vim /etc/hosts

192.168.111.201 k8s-master
192.168.111.202 k8s-node1
192.168.111.203 k8s-node2
1
2
scp /etc/hosts root@k8s-node1:/etc/
scp /etc/hosts root@k8s-node2:/etc/

安装依赖包

1
yum -y install conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git bash-completion

设置防火墙&SeLinux

1
2
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
1
2
swapoff -a && sed -i  '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

调整内核参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用 swap 空间,只有当系统OOM时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panic_on_oom=0 #开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时区

1
2
3
4
5
6
7
# 设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统的时间服务
systemctl restart rsyslog
systemctl restart crond

关闭系统不需要的服务

1
systemctl stop postfix && systemctl disable postfix

设置 rsyslogd 和 systemd journald

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf << EOF
[Journal]
#持久化保存到磁盘
Storage=persistent

#压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

#最大占用空间10G
SystemMaxUse=10G

#单日志文件最大 200M
SystemMaxFileSize=200M

#日志保存时间 2周
MaxRetentionSec=2week

#不将日志转发到 syslog
ForwardToSyslog=no
EOF

#重启日志服务
systemctl restart systemd-journald

升级系统内核到5.4

CentOS7.x 系统自带的3.10.x内核存在一些 Bug,导致运行的 Docker,Kubernetes 不稳定,我们需要升级一下内核

1
2
3
4
5
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次
yum --enablerepo=elrepo-kernel install -y kernel-lt
#设置开机从新内核启动
grub2-set-default "CentOS Linux (5.4.225-1.el7.elrepo.x86_64) 7 (Core)"

安装K8S集群

kube-proxy 开启ipvs的前置条件

1
2
3
4
5
6
7
8
9
10
11
12
modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装 Docker 软件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
yum -y install yum-utils device-mapper-persistent-data lvm2

yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce


#设置开机从新内核启动,重启服务器(update后,系统会变回3.10了)
grub2-set-default "CentOS Linux (5.4.225-1.el7.elrepo.x86_64) 7 (Core)"
reboot

#确认内核版本
uname -r
5.4.225-1.el7.elrepo.x86_64

# 创建 /etc/docker 目录
mkdir /etc/docker

#配置daemon

cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": ["json-file"],
"log-opts": {
"max-size": "100m"
}
}
EOF

mkdir -p /etc/systemd/system/docker.service.d

# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

安装 kubeadm (主从配置)

1
2
3
4
5
6
7
8
9
10
11
12
cat <<EOF > /etc/yum.repos.d/kubernetes.repo 
[Kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service

下载镜像

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
K8S安装包
https://www.aliyundrive.com/s/ukkPSaMDeG8
提取码: 7n6j

将下载的压缩包上传至/usr/local/src目录下并解压。

[root@k8s-master src]# pwd
/usr/local/src
[root@k8s-master src]# tree -L 2
.
├── docker-compose
├── harbor-offline-installer-v1.2.0.tgz
├── kubeadm-basic.images
│   ├── apiserver.tar
│   ├── coredns.tar
│   ├── etcd.tar
│   ├── kubec-con-man.tar
│   ├── pause.tar
│   ├── proxy.tar
│   └── scheduler.tar
├── kubeadm-basic.images.tar.gz
└── \350\275\257\350\267\257\347\224\261.7z

1 directory, 11 files

生成初始化配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 生成初始化配置文件
kubeadm config print init-defaults > kubeadm-config.yaml
#修改不部分配置文件
localAPIEndpoint:
advertiseAddress:192.168.111.201 #主节点ip
kubernetesVersion: v1.15.0 #版本号
networking:
podSubnet: "10.244.0.0/16" #添加一个pod网段
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeporxy.config.k8s.io/v1alpha1 #修改为ipvs管理
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

配置文件内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.111.201
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-master
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

进行初始化安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
kubeadm init --config=kubeadm-config.yaml --upload-certs |tee kubeadm-init.log

[init] Using Kubernetes version: v1.15.1
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.21. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.111.201]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.111.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.111.201 127.0.0.1 ::1]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 38.004014 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
c7d4324264d6989efe538e2a699a415b7024a020e07a7925b37decc4cb16b92d
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[kubelet-check] Initial timeout of 40s passed.
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.111.201:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:9c0b031b3653733b7acadbaed38cc3c5faa8e582a48c92eb387ff416c2850d7c
1
2
3
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
1
2
3
[root@k8s-master flannel]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 38m v1.15.1

如果init失败的话,可以根据报错日志修改相关配置,并使用以下命令重置节点

1
kubeadm reset

部署网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@k8s-master flannel]# kubectl apply -f kube-flannel.yml

namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

[root@k8s-master flannel]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-7ndgf 0/1 Pending 0 39m
coredns-5c98db65d4-jmgkk 0/1 Pending 0 39m
etcd-k8s-master 1/1 Running 0 38m
kube-apiserver-k8s-master 1/1 Running 0 38m
kube-controller-manager-k8s-master 1/1 Running 0 38m
kube-proxy-fqjgn 1/1 Running 0 39m
kube-scheduler-k8s-master 1/1 Running 0 38m

kube-flannel.yml 文件内容如下

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.1 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

将node节点加入到集群中

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#可以使用这条命令获取加入集群的命令
kubeadm token create --print-join-command

kubeadm join 192.168.111.201:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:9c0b031b3653733b7acadbaed38cc3c5faa8e582a48c92eb387ff416c2850d7c


[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 49m v1.15.1
k8s-node1 Ready <none> 42m v1.15.1
k8s-node2 Ready <none> 42m v1.15.1

[root@k8s-master flannel]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-fslj5 0/1 ContainerCreating 0 50m
coredns-5c98db65d4-vrgnq 0/1 ContainerCreating 0 50m
etcd-k8s-master 1/1 Running 0 49m
kube-apiserver-k8s-master 1/1 Running 0 49m
kube-controller-manager-k8s-master 1/1 Running 0 49m
kube-flannel-ds-amd64-scwhp 1/1 Running 0 7m41s
kube-flannel-ds-amd64-vxzsr 1/1 Running 0 7m41s
kube-flannel-ds-amd64-xvps6 1/1 Running 0 7m41s
kube-proxy-7g7z7 1/1 Running 0 44m
kube-proxy-gchtw 1/1 Running 0 44m
kube-proxy-mdxbr 1/1 Running 0 50m
kube-scheduler-k8s-master 1/1 Running 0 49m

FAQ

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
Q1:
[root@k8s-master src]# kubeadm init --config=kubeadm-config.yaml --upload-certs |tee kubeadm-init.log
W1127 22:06:01.251107 2430 initconfiguration.go:305] unknown configuration schema.GroupVersionKind{Group:"kubeporxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}
W1127 22:06:01.251311 2430 initconfiguration.go:331] [config] WARNING: Ignored YAML document with GroupVersionKind kubeporxy.config.k8s.io/v1alpha1, Kind=KubeProxyConfiguration
[init] Using Kubernetes version: v1.25.0
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR CRI]: container runtime is not running: output: E1127 22:06:01.290159 2440 remote_runtime.go:948] "Status from runtime service failed" err="rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.RuntimeService"
time="2022-11-27T22:06:01+08:00" level=fatal msg="getting status of runtime: rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.RuntimeService"
, error: exit status 1
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
A1:
rm -rf /etc/containerd/config.toml
systemctl restart containerd



Q2:
Nov 27 22:03:14 k8s-master kubelet: F1127 22:03:14.712519 41018 server.go:273] failed to run Kubelet: failed to create kubelet: misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs"
A2:
kubernetes官方推荐docker等使用systemd作为cgroupdriver,保持两个文件里的配置相同
修改docker配置文件/etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
修改kubnetes 配置文件/var/lib/kubelet/config.yaml
cgroupDriver: systemd

systemctl daemon-reload
systemctl restart docker
systemctl restart kubelet


Q3:
Nov 27 23:32:37 k8s-master kubelet[107375]: E1127 23:32:37.687073 107375 pod_workers.go:190] Error syncing pod ecae9d12d3610192347be3d1aa5aa552 ("kube-scheduler-k8s-master_kube-system(ecae9d12d3610192347be3d1aa5aa552)"), skipping: failed to "CreatePodSandbox" for "kube-scheduler-k8s-master_kube-system(ecae9d12d3610192347be3d1aa5aa552)" with CreatePodSandboxError: "CreatePodSandbox for pod \"kube-scheduler-k8s-master_kube-system(ecae9d12d3610192347be3d1aa5aa552)\" failed: rpc error: code = Unknown desc = failed to create a sandbox for pod \"kube-scheduler-k8s-master\": Error response from daemon: Conflict. The container name \"/k8s_POD_kube-scheduler-k8s-master_kube-system_ecae9d12d3610192347be3d1aa5aa552_3\" is already in use by container \"7dcb605b10a59a09aa084a73dcf1d5ab68cc9a27ad6a89aa616ac0e6e42c540e\". You have to remove (or rename) that container to be able to reuse that name."
Nov 27 23:32:37 k8s-master kubelet[107375]: E1127 23:32:37.741283 107375 kubelet.go:2248] node "k8s-master" not found
Nov 27 23:32:37 k8s-master kubelet[107375]: E1127 23:32:37.841936 107375 kubelet.go:2248] node "k8s-master" not found

A3:
[root@k8s-master ~]# cat /var/lib/kubelet/cpu_manager_state
{"policyName":"none","defaultCpuSet":"","checksum":3242152201}
[root@k8s-master ~]# rm -f /var/lib/kubelet/cpu_manager_state