最新的 k8s v1.23.5安装

镜像下载、域名解析、时间同步请点击 阿里云开源镜像站

一、在两台机器上安装docker

// 1.安装Docker源
yum install -y wget && wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

// 2.安装Docker
yum -y install docker-ce-18.06.1.ce-3.el7

// 3.开启自启和启动
systemctl enable docker && systemctl start docker

// 4.查看版本
docker --version

二、安装最新的,k8s

// 查找最新版本
[root@master ~]# curl -sSL https://dl.k8s.io/release/stable.txt
v1.23.5
// 下载安装
[root@master tmp]# wget -q https://dl.k8s.io/v1.23.5/kubernetes-server-linux-amd64.tar.gz

[root@master tmp]# tar -zxf kubernetes-server-linux-amd64.tar.gz
[root@master tmp]# ls kubernetes
addons  kubernetes-src.tar.gz  LICENSES  server
[root@master tmp]# ls kubernetes/server/bin/ | grep -E 'kubeadm|kubelet|kubectl'
kubeadm
kubectl
kubelet

// 可以看到在 server/bin/ 目录下有我们所需要的全部内容,将我们所需要的 kubeadm kubectl kubelet 等都移动至 /usr/bin 目录下。
[root@master tmp]# mv kubernetes/server/bin/kube{adm,ctl,let} /usr/bin/
[root@master tmp]# ls /usr/bin/kube*
/usr/bin/kubeadm  /usr/bin/kubectl  /usr/bin/kubelet
[root@master tmp]# kubeadm version
[root@master tmp]# kubectl version --client
[root@master tmp]# kubelet --version

//为了在生产环境中保障各组件的稳定运行,同时也为了便于管理,我们增加对 kubelet 的 systemd 的配置,由 systemd 对服务进行管理。

[root@master tmp]# cat <<'eof'> /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Agent
Documentation=http://kubernetes.io/docs/

[Service]
ExecStart=/usr/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF
[root@master tmp]# mkdir -p /etc/systemd/system/kubelet.service.d
[root@master tmp]# cat <<'eof'> /etc/systemd/system/kubelet.service.d/kubeadm.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
EOF
// &#x8BBE;&#x7F6E;&#x5F00;&#x673A;&#x81EA;&#x542F;
[root@master tmp]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.

// &#x6B64;&#x65F6;&#xFF0C;&#x6211;&#x4EEC;&#x7684;&#x524D;&#x671F;&#x51C6;&#x5907;&#x5DF2;&#x7ECF;&#x57FA;&#x672C;&#x5B8C;&#x6210;&#xFF0C;&#x53EF;&#x4EE5;&#x4F7F;&#x7528; kubeadm &#x6765;&#x521B;&#x5EFA;&#x96C6;&#x7FA4;&#x4E86;&#x3002;&#x522B;&#x7740;&#x6025;&#xFF0C;&#x5728;&#x6B64;&#x4E4B;&#x524D;&#xFF0C;&#x6211;&#x4EEC;&#x8FD8;&#x9700;&#x8981;&#x5B89;&#x88C5;&#x4E24;&#x4E2A;&#x5DE5;&#x5177;&#xFF0C;&#x540D;&#x4E3A;crictl &#x548C; socat&#x3002;

// Kubernetes v1.23.5 &#x5BF9;&#x5E94; crictl-v1.23.0
[root@master ~]# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.23.0/crictl-v1.23.0-linux-amd64.tar.gz

[root@master ~]# tar zxvf crictl-v1.23.0-linux-amd64.tar.gz
[root@master ~]# mv crictl /usr/bin/

sudo yum install -y socat
// &#x542F;&#x52A8; master
[root@master ~]# kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.23.5
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileExisting-conntrack]: conntrack not found in system path
[preflight] If you know what you are doing, you can make a check non-fatal with --ignore-preflight-errors=...
To see the stack trace of this error execute with --v=5 or higher

// &#x62A5;&#x9519;&#x4E86; &#x9700;&#x8981;&#x5B89;&#x88C5;conntrack-tools
yum -y install socat conntrack-tools

// &#x53C8;&#x62A5;&#x9519;&#x4E86;
[kubelet-check] Initial timeout of 40s passed.

[kubelet-check] It seems like the kubelet isn't running or healthy.

[kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.

// Docker&#x662F;&#x7528;yum&#x5B89;&#x88C5;&#x7684;&#xFF0C;docker&#x7684;cgroup&#x9A71;&#x52A8;&#x7A0B;&#x5E8F;&#x9ED8;&#x8BA4;&#x8BBE;&#x7F6E;&#x4E3A;systemd&#x3002;&#x9ED8;&#x8BA4;&#x60C5;&#x51B5;&#x4E0B;Kubernetes cgroup&#x4E3A;system&#xFF0C;&#x6211;&#x4EEC;&#x9700;&#x8981;&#x66F4;&#x6539;Docker cgroup&#x9A71;&#x52A8;&#xFF0C;
&#x6DFB;&#x52A0;&#x4EE5;&#x4E0B;&#x5185;&#x5BB9;
vim /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}

&#x91CD;&#x542F;docker
systemctl restart docker
&#x91CD;&#x65B0;&#x521D;&#x59CB;&#x5316; kubeadm
kubeadm reset # &#x5148;&#x91CD;&#x7F6E;

kubeadm init \
--apiserver-advertise-address=192.168.42.122  \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.22.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all

kubeadm reset

// &#x53EF;&#x4EE5;&#x7B80;&#x5355;&#x521D;&#x59CB;&#x5316;
kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16

Your Kubernetes control-plane has initialized successfully!

/var/lib/kubelet/config.yaml   #kubeadm&#x914D;&#x7F6E;&#x6587;&#x4EF6;
/etc/kubernetes/pki            #&#x8BC1;&#x4E66;&#x5B58;&#x653E;&#x76EE;&#x5F55;

[root@master ~]# kubeadm config images list --kubernetes-version v1.23.5
k8s.gcr.io/kube-apiserver:v1.23.5
k8s.gcr.io/kube-controller-manager:v1.23.5
k8s.gcr.io/kube-scheduler:v1.23.5
k8s.gcr.io/kube-proxy:v1.23.5
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
[root@master ~]# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.23.5
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.5
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.5
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.5
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.23.5
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.6
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.1-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.8.6

// &#x914D;&#x7F6E; &#x73AF;&#x5883;&#x53D8;&#x91CF; &#xFF0C;&#x6BCF;&#x6B21;&#x91CD;&#x542F;&#xFF0C;kubeadm &#x90FD;&#x8981;&#x914D;&#x7F6E;&#xFF0C;&#x8FD9;&#x4E2A;&#x5F85;&#x7814;&#x7A76;
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:
  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

// &#x5B89;&#x88C5; &#x901A;&#x4FE1;&#x7EC4;&#x4EF6; flannel &#x6216;&#x8005; calico

mkdir ~/kubernetes-flannel && cd ~/kubernetes-flannel

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl apply -f kube-flannel.yml

kubectl get nodes

[root@master ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master ~]# kubectl get pod -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-6d8c4cb4d-7jfb8          0/1     Pending   0          11m
coredns-6d8c4cb4d-m8hfd          0/1     Pending   0          11m
etcd-master                      1/1     Running   4          11m
kube-apiserver-master            1/1     Running   3          11m
kube-controller-manager-master   1/1     Running   4          11m
kube-flannel-ds-m65q6            1/1     Running   0          17s
kube-proxy-qlrmp                 1/1     Running   0          11m
kube-scheduler-master            1/1     Running   4          11m
// coredns &#x4E00;&#x76F4;&#x662F; Pending&#x6CA1;&#x6709;&#x627E;&#x5230;&#x539F;&#x56E0;
// &#x4E8E;&#x662F;&#x4E4E;&#x51B3;&#x5B9A;&#x6362;&#x6210; calico&#x8BD5;&#x8BD5;

&#x5148;&#x5220;&#x9664;  kube-flannel

[root@master ~]# kubectl delete -f kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy "psp.flannel.unprivileged" deleted
clusterrole.rbac.authorization.k8s.io "flannel" deleted
clusterrolebinding.rbac.authorization.k8s.io "flannel" deleted
serviceaccount "flannel" deleted
configmap "kube-flannel-cfg" deleted
daemonset.apps "kube-flannel-ds" deleted

[root@master ~]# ifconfig cni0 down
cni0: ERROR while getting interface flags: No such device
[root@master ~]# ip link delete cni0
Cannot find device "cni0"
[root@master ~]# rm -rf /var/lib/cni/
[root@master ~]# ifconfig flannel.1 down
[root@master ~]# ip link delete flannel.1
[root@master ~]# rm -f /etc/cni/net.d/*
[root@master ~]# restart kubelet
-bash: restart: command not found
[root@master ~]# systemctl restart kubelet
// &#x5B89;&#x88C5; calico
[root@master ~]# curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  212k  100  212k    0     0  68018      0  0:00:03  0:00:03 --:--:-- 68039
[root@master ~]# ls
calico.yaml  kube-flannel.yml  kubernetes-flannel
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE   VERSION
master   NotReady   control-plane,master   16h   v1.23.5
node1    NotReady   <none>                 12h   v1.23.5
[root@master ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
poddisruptionbudget.policy/calico-kube-controllers created
// &#x67E5;&#x8BE2; pod
[root@master ~]# kubectl get -w pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-56fcbf9d6b-28w9g   1/1     Running   0          21m
kube-system   calico-node-btgnl                          1/1     Running   0          21m
kube-system   calico-node-z64mb                          1/1     Running   0          21m
kube-system   coredns-6d8c4cb4d-8pnxx                    1/1     Running   0          12h
kube-system   coredns-6d8c4cb4d-jdbj2                    1/1     Running   0          12h
kube-system   etcd-master                                1/1     Running   4          17h
kube-system   kube-apiserver-master                      1/1     Running   3          17h
kube-system   kube-controller-manager-master             1/1     Running   4          17h
kube-system   kube-proxy-68qrn                           1/1     Running   0          12h
kube-system   kube-proxy-qlrmp                           1/1     Running   0          17h
kube-system   kube-scheduler-master                      1/1     Running   4          17h
&#x8FD0;&#x884C;&#x6B63;&#x5E38;&#x4E86;
</none></'eof'></'eof'>

原文链接:https://blog.csdn.net/qq_36002737/article/details/123678418

Original: https://www.cnblogs.com/helong-123/p/16206763.html
Author: 萌褚
Title: 最新的 k8s v1.23.5安装

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/523736/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球