K8S-二进制安装使用
1.IP总规划
服务类型 ip地址 组件 k8s-master01
etcd集群节点1 192.168.80.20 kube-apiserver、kube-controller-manager、kube-scheduler etcd k8s-master02 192.168.80.25 k8s-node01
etcd集群节点2 192.168.80.30 kubelet、kube-proxy、docker、etcd k8s-node02
etcd集群节点3 192.168.80.35 kubelet、kube-proxy、docker、etcd 负载均衡nginx+keepalive01(master) 192.168.80.40
vip:192.168.80.100 负载均衡nginx+keepalive02(backup) 192.168.80.45
vip192.168.80.100
2.部署步骤
2.1 环境准备(master01、node01、node02)
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#关闭selinux
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
#关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#根据规划设置主机名
hostnamectl set-hostname master01
hostnamectl set-hostname node01
hostnamectl set-hostname node02
#在master添加hosts
cat >> /etc/hosts << EOF
192.168.80.20 master01
192.168.80.25 master02
192.168.80.30 node01
192.168.80.35 node02
EOF
#调整内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
#开启网桥模式,可将网桥的流量传递给iptables链
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
#关闭ipv6协议
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
sysctl --system
#时间同步
yum install ntpdate -y
ntpdate time.windows.com
crontab -e
*/30 * * * * ntpdate time.windows.com
操作截图
2.2 部署 etcd 集群
//在 master01 节点上操作
#准备cfssl证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl*
#生成Etcd证书
mkdir /opt/k8s
cd /opt/k8s/
#上传 etcd-cert.sh 和 etcd.sh 到 /opt/k8s/ 目录中
chmod +x etcd-cert.sh etcd.sh
#创建用于生成CA证书、etcd 服务器证书以及私钥的目录
mkdir /opt/k8s/etcd-cert
mv etcd-cert.sh etcd-cert/
cd /opt/k8s/etcd-cert/
./etcd-cert.sh
ls
ca-config.json ca-csr.json ca.pem server.csr server-key.pem
ca.csr ca-key.pem etcd-cert.sh server-csr.json server.pem
#上传 etcd-v3.4.9-linux-amd64.tar.gz 到 /opt/k8s 目录中,启动etcd服务
cd /opt/k8s/
tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
mkdir -p /opt/etcd/{cfg,bin,ssl}
cd /opt/k8s/etcd-v3.4.9-linux-amd64/
mv etcd etcdctl /opt/etcd/bin/
cp /opt/k8s/etcd-cert/*.pem /opt/etcd/ssl/
cd /opt/k8s/
./etcd.sh etcd01 192.168.80.20 etcd02=https://192.168.80.30:2380,etcd03=https://192.168.80.35:2380
ps -ef | grep etcd
scp -r /opt/etcd/ root@192.168.80.30:/opt/
scp -r /opt/etcd/ root@192.168.80.35:/opt/
scp /usr/lib/systemd/system/etcd.service root@192.168.80.30:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service root@192.168.80.35:/usr/lib/systemd/system/
//在 node01 节点上操作
vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd02" #修改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.80.30:2380" #修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.80.30:2379" #修改
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.80.30:2380" #修改
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.80.30:2379" #修改
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.80.20:2380,etcd02=https://192.168.80.30:2380,etcd03=https://192.168.80.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
systemctl start etcd
systemctl enable etcd
systemctl status etcd
//在 node02 节点上操作
vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd03" #修改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.80.35:2380" #修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.80.35:2379" #修改
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.80.35:2380" #修改
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.80.35:2379" #修改
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.80.20:2380,etcd02=https://192.168.80.30:2380,etcd03=https://192.168.80.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
systemctl start etcd
systemctl enable etcd
systemctl status etcd
#检查etcd群集状态
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.80.20:2379,https://192.168.80.30:2379,https://192.168.80.35:2379" endpoint health --write-out=table
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.80.20:2379,https://192.168.80.30:2379,https://192.168.80.35:2379" --write-out=table member list
etcd.sh内容
#!/bin/bash
#example: ./etcd.sh etcd01 192.168.80.20 etcd02=https://192.168.80.30:2380,etcd03=https://192.168.80.35:2380
#创建etcd配置文件/opt/etcd/cfg/etcd
ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
WORK_DIR=/opt/etcd
cat > $WORK_DIR/cfg/etcd < /usr/lib/systemd/system/etcd.service <
etcd-cert.sh内容
#!/bin/bash
#配置证书生成策略,让 CA 软件知道颁发有什么功能的证书,生成用来签发其他组件证书的根证书
cat > ca-config.json < ca-csr.json <:使用 CSRJSON 文件生成生成新的证书和私钥。如果不添加管道符号,会直接把所有证书内容输出到屏幕。
#注意:CSRJSON 文件用的是相对路径,所以 cfssl 的时候需要 csr 文件的路径下执行,也可以指定为绝对路径。
#cfssljson 将 cfssl 生成的证书(json格式)变为文件承载式证书,-bare 用于命名生成的证书文件。
#-----------------------
#生成 etcd 服务器证书和私钥
cat > server-csr.json <
操作截图
master01操作
node1节点修改
node2节点修改
2.3 部署 docker引擎
//所有 node 节点部署docker引擎
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io
systemctl start docker.service
systemctl enable docker.service
操作截图(以node01为例)
2.4 部署master组件
//在 master01 节点上操作
#上传 master.zip 和 k8s-cert.sh 到 /opt/k8s 目录中,解压 master.zip 压缩包
#master.zip 中为apiserver.sh、schedule.sh、admin.sh、controller-manager.sh 4个脚本
cd /opt/k8s/
unzip master.zip
chmod +x *.sh
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
#创建用于生成CA证书、相关组件的证书和私钥的目录
mkdir /opt/k8s/k8s-cert
mv /opt/k8s/k8s-cert.sh /opt/k8s/k8s-cert
cd /opt/k8s/k8s-cert/
./k8s-cert.sh
ls *pem
admin-key.pem apiserver-key.pem ca-key.pem kube-proxy-key.pem
admin.pem apiserver.pem ca.pem kube-proxy.pem
cp ca*pem apiserver*pem /opt/kubernetes/ssl/
#上传 kubernetes-server-linux-amd64.tar.gz 到 /opt/k8s/ 目录中,解压 kubernetes 压缩包
cd /opt/k8s/
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd /opt/k8s/kubernetes/server/bin
cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
ln -s /opt/kubernetes/bin/* /usr/local/bin/
#创建 bootstrap token 认证文件,apiserver 启动时会调用,然后就相当于在集群内创建了一个这个用户,接下来就可以用 RBAC 给他授权
cd /opt/k8s/
vim token.sh
#!/bin/bash
#获取随机数前16个字节内容,以十六进制格式输出,并删除其中空格
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
#生成 token.csv 文件,按照 Token序列号,用户名,UID,用户组 的格式生成
cat > /opt/kubernetes/cfg/token.csv <
k8s-cert.sh内容
#!/bin/bash
#配置证书生成策略,让 CA 软件知道颁发有什么功能的证书,生成用来签发其他组件证书的根证书
cat > ca-config.json < ca-csr.json < apiserver-csr.json < admin-csr.json < kube-proxy-csr.json <
apiserver.sh内容
#!/bin/bash
#example: apiserver.sh 192.168.80.10 https://192.168.80.20:2379,https://192.168.80.30:2379,https://192.168.80.35:2379
#创建 kube-apiserver 启动参数配置文件
MASTER_ADDRESS=$1
ETCD_SERVERS=$2
cat >/opt/kubernetes/cfg/kube-apiserver </usr/lib/systemd/system/kube-apiserver.service <
schedule.sh内容
#!/bin/bash
##创建 kube-scheduler 启动参数配置文件
MASTER_ADDRESS=$1
cat >/opt/kubernetes/cfg/kube-scheduler < kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#生成kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://192.168.80.20:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-scheduler \
--client-certificate=./kube-scheduler.pem \
--client-key=./kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
##创建 kube-scheduler.service 服务管理文件
cat >/usr/lib/systemd/system/kube-scheduler.service <
controller-manager.sh
#!/bin/bash
##创建 kube-controller-manager 启动参数配置文件
MASTER_ADDRESS=$1
cat >/opt/kubernetes/cfg/kube-controller-manager < kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://192.168.80.20:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
##创建 kube-controller-manager.service 服务管理文件
cat >/usr/lib/systemd/system/kube-controller-manager.service <
admin.sh内容
#!/bin/bash
mkdir /root/.kube
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://192.168.80.20:6443"
cd /opt/k8s/k8s-cert/
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
controller-manager.sh内容
#!/bin/bash
##创建 kube-controller-manager 启动参数配置文件
MASTER_ADDRESS=$1
cat >/opt/kubernetes/cfg/kube-controller-manager < kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://192.168.80.20:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
##创建 kube-controller-manager.service 服务管理文件
cat >/usr/lib/systemd/system/kube-controller-manager.service <
操作截图
2.5 部署 Worker Node 组件
MASTER02环境准备
先将2.1所有步骤执行一下
//在所有 node 节点上操作
#创建kubernetes工作目录
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
#上传 node.zip 到 /opt 目录中,解压 node.zip 压缩包,获得kubelet.sh、proxy.sh
cd /opt/
unzip node.zip
chmod +x kubelet.sh proxy.sh
//在 master01 节点上操作
#把 kubelet、kube-proxy 拷贝到 node 节点
cd /opt/k8s/kubernetes/server/bin
scp kubelet kube-proxy root@192.168.80.30:/opt/kubernetes/bin/
scp kubelet kube-proxy root@192.168.80.35:/opt/kubernetes/bin/
#上传 kubeconfig.sh 文件到 /opt/k8s/kubeconfig 目录中,生成 kubeconfig 的配置文件
mkdir /opt/k8s/kubeconfig
cd /opt/k8s/kubeconfig
chmod +x kubeconfig.sh
./kubeconfig.sh 192.168.80.20 /opt/k8s/k8s-cert/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.80.30:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.80.35:/opt/kubernetes/cfg/
#RBAC授权,使用户 kubelet-bootstrap 能够有权限发起 CSR 请求
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
//在 node01/02 节点上操作
#启动 kubelet 服务
cd /opt/
./kubelet.sh 192.168.80.30/35
ps aux | grep kubelet
//在 master01 节点上操作,通过 CSR 请求
#检查到 node01 节点的 kubelet 发起的 CSR 请求,Pending 表示等待集群给该节点签发证书
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-od7WoiYNololFAtl9xbhzB2aE3HWFi2uPWQzwygbmBU 69s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-qq9aKXpHuxbDnG_KTpfbxbAWbuY5Na9813tq_n94_6Y 2m6s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
#通过 CSR 请求
kubectl certificate approve node-csr-od7WoiYNololFAtl9xbhzB2aE3HWFi2uPWQzwygbmBU
kubectl certificate approve node-csr-qq9aKXpHuxbDnG_KTpfbxbAWbuY5Na9813tq_n94_6Y
#Approved,Issued 表示已授权 CSR 请求并签发证书
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-od7WoiYNololFAtl9xbhzB2aE3HWFi2uPWQzwygbmBU 3m49s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
node-csr-qq9aKXpHuxbDnG_KTpfbxbAWbuY5Na9813tq_n94_6Y 4m46s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
#查看节点,由于网络插件还没有部署,节点会没有准备就绪 NotReady
kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.80.30 NotReady 69s v1.20.11
192.168.80.35 NotReady 77s v1.20.11
//在 node01/02 节点上操作
#加载 ip_vs 模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
#启动proxy服务
cd /opt/
./proxy.sh 192.168.80.30/35
ps aux | grep kube-proxy
kubeconfig.sh内容
#!/bin/bash
#example: kubeconfig 192.168.80.20 /opt/k8s/k8s-cert/
#创建bootstrap.kubeconfig文件
#该文件中内置了 token.csv 中用户的 Token,以及 apiserver CA 证书;kubelet 首次启动会加载此文件,使用 apiserver CA 证书建立与 apiserver 的 TLS 通讯,使用其中的用户 Token 作为身份标识向 apiserver 发起 CSR 请求
BOOTSTRAP_TOKEN=$(awk -F ',' '{print $1}' /opt/kubernetes/cfg/token.csv)
APISERVER=$1
SSL_DIR=$2
export KUBE_APISERVER="https://$APISERVER:6443"
设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
#--embed-certs=true:表示将ca.pem证书写入到生成的bootstrap.kubeconfig文件中
设置客户端认证参数,kubelet 使用 bootstrap token 认证
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
使用上下文参数生成 bootstrap.kubeconfig 文件
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#----------------------
#创建kube-proxy.kubeconfig文件
设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
设置客户端认证参数,kube-proxy 使用 TLS 证书认证
kubectl config set-credentials kube-proxy \
--client-certificate=$SSL_DIR/kube-proxy.pem \
--client-key=$SSL_DIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
使用上下文参数生成 kube-proxy.kubeconfig 文件
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
kubelet.sh内容
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
#创建 kubelet 启动参数配置文件
cat >/opt/kubernetes/cfg/kubelet </opt/kubernetes/cfg/kubelet.config </usr/lib/systemd/system/kubelet.service <
proxy.sh内容
#!/bin/bash
NODE_ADDRESS=$1
#创建 kube-proxy 启动参数配置文件
cat >/opt/kubernetes/cfg/kube-proxy </usr/lib/systemd/system/kube-proxy.service <
操作截图
node01/02操作
master节点操作
node01/02操作
master操作
node1/2操作
2.6 部署网络组件(flannel)
//在 node01/02 节点上操作
#上传 cni-plugins-linux-amd64-v0.8.6.tgz 和 flannel.tar 到 /opt 目录中
cd /opt/
docker load -i flannel.tar
mkdir -p /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin
//在 master01 节点上操作
#上传 kube-flannel.yml 文件到 /opt/k8s 目录中,部署 CNI 网络
cd /opt/k8s
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-4t9r8 1/1 Running 0 15s
kube-flannel-ds-ddpn4 1/1 Running 0 15s
kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.80.30 Ready 79m v1.20.11
192.168.80.35 Ready 79m v1.20.11
kube-flannel.yml文件内容
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
操作截图
node01/02节点操作(以node01为例)
master01操作
2.7 多master节点部署
新主机准备,先将步骤2.1全部在master02中执行一遍
#从 master01 节点上拷贝证书文件、各master组件的配置文件和服务管理文件到 master02 节点
scp -r /opt/etcd/ root@192.168.80.25:/opt/
scp -r /opt/kubernetes/ root@192.168.80.25:/opt
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.80.25:/usr/lib/systemd/system/
cd ~
scp -r .kube/ master02:/root
#修改配置文件kube-apiserver中的IP master02上操作
vim /opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.80.20:2379,https://192.168.80.30:2379,https://192.168.80.35:2379 \
--bind-address=192.168.80.25 \ #修改
--secure-port=6443 \
--advertise-address=192.168.80.25 \ #修改
......
#创建命令链接
ln -s /opt/kubernetes/bin/* /usr/local/bin
#启动各服务,并设置开机自启
systemctl enable --now kube-apiserver.service
systemctl enable --now kube-controller-manager.service
systemctl enable --now kube-scheduler.service
操作截图
配置master02主机
2.8 lb01和lb02搭建nginx和Keepalived
配置load balancer集群双机热备负载均衡(nginx实现负载均衡,keepalived实现双机热备)
###lb01/lb02步骤一致,除个别ip和配置需修改
###机器初始化
systemctl disable --now firewalld.service
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
setenforce 0
hostnamectl set-hostname lb01/02
su
###nginx yum源配置
cat > /etc/yum.repos.d/nginx.repo << 'EOF'
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
EOF
yum install nginx -y
###nginx配置文件修改,配置负载均衡
vim /etc/nginx/nginx.conf
events {
worker_connections 1024;
}
#添加
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.80.20:6443;
server 192.168.80.25:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
http {
......
###启动nginx服务,监听6443端口
nginx -t
systemctl start nginx
systemctl enable nginx
netstat -natp | grep nginx
###安装并配置keeplived
yum install keepalived -y
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址(不变)
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1 ##修改为127.0.0.1
smtp_connect_timeout 30
router_id LB01 #lb01节点的为 LB01,lb02节点的为 LB02
}
#添加一个周期性执行的脚本
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh" #指定检查nginx存活的脚本路径
}
vrrp_instance VI_1 {
state MASTER #lb01节点的为 MASTER,lb02节点的为 BACKUP
interface ens33 #指定网卡名称 ens33
virtual_router_id 51 #指定vrid,两个节点要一致
priority 100 #lb01节点的为 100,lb02节点的为 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.80.100/24 #指定 VIP
}
track_script {
check_nginx #指定vrrp_script配置的脚本
}
}
其余配置删除
### 配置nginx检查脚本
vim /etc/nginx/check_nginx.sh
#!/bin/bash
#egrep -cv "grep|$$" #用于过滤掉包含grep 或者 $$ 表示的当前Shell进程ID
count=$(ps -ef | grep nginx | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
chmod +x /etc/nginx/check_nginx.sh
###启动keeplived(nginx服务必须先启动)
systemctl start keepalived
systemctl enable keepalived
ip a #查看VIP是否生成
操作截图
keepalived测试
2.lb01恢复
2.9 修改node节点配置
###修改所有node节点上的bootstrap.kubeconfig,kubelet.kubeconfig配置文件为VIP
cd /opt/kubernetes/cfg/
vim bootstrap.kubeconfig
server: https://192.168.80.100:6443
vim kubelet.kubeconfig
server: https://192.168.80.100:6443
vim kube-proxy.kubeconfig
server: https://192.168.80.100:6443
###重启kubelet和kube-proxy服务
systemctl restart kubelet.service
systemctl restart kube-proxy.service
###LB01查看日志
tail /var/log/nginx/k8s-access.log
操作截图(以node01为例)
2.10 操作测试
###创建pod测试
[root@master02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.80.30 Ready 2d2h v1.20.11
192.168.80.35 Ready 2d2h v1.20.11
[root@master02 ~]# kubectl run nginx --image=nginx
pod/nginx created
[root@master02 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 23s
[root@master02 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 40s
[root@master02 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 50s 10.244.1.2 192.168.80.30
[root@master02 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx 1/1 Running 0 62s 10.244.1.2 192.168.80.30
### 对应节点访问服务
curl 172.17.38.2
### master01查看日志
kubectl logs nginx
2.11 Dashboard UI部署
Dashboard UI是k8s的管理UI界面,可以通过图形化界面管理k8s的节点。
###创建命名空间
kubectl create ns kubernetes-dashboard
###创建Dashboard rbac文件
kubectl apply -f http://down.i4t.com/rbac-dashboard.yaml
创建dashboard
kubectl apply -f http://down.i4t.com/dashboard-k8s.yaml
###查看pod和svc
kubectl get pod,svc -n kubernetes-dashboard
###查看secret
kubectl -n kube-system get secret|grep admin-token
###获取token
kubectl get secret -n kube-system
操作截图
访问测试
查看节点状态,可以看到之前创建的nginx
Original: https://www.cnblogs.com/Canyun-blogs/p/16477918.html
Author: 残-云
Title: K8s-二进制安装
原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/606658/
转载文章受原作者版权保护。转载请注明原作者出处!