1. 環(huán)境準(zhǔn)備
在開始之前,部署Kubernetes集群機(jī)器需要滿足以下幾個條件:
- 一臺或多臺機(jī)器,操作系統(tǒng) CentOS7.x-86_x64;
- 硬件配置:2GB或更多RAM,2個CPU或更多CPU,硬盤30GB或更多;
- 集群中所有機(jī)器之間網(wǎng)絡(luò)互通;
- 可以訪問外網(wǎng),需要拉取鏡像;
- 禁止swap分區(qū)。
主機(jī)詳情
IP | Hostname | 備注 |
---|---|---|
192.168.32.128 | K8s-master | Master,控制平面,docker |
192.168.32.129 | K8s-node1 | 節(jié)點(diǎn),docker |
192.168.32.130 | K8s-node2 | 節(jié)點(diǎn),docker |
2. 準(zhǔn)備安裝環(huán)境(三臺機(jī)器都要執(zhí)行)
2.1 修改主機(jī)hostname
# 分別在三臺機(jī)器上執(zhí)行:
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
2.2 修改hosts文件,三臺機(jī)器都要執(zhí)行
# 角色 系統(tǒng) ip
cat > /etc/hosts << EOF
192.168.32.128 k8s-master
192.168.32.129 k8s-node1
192.168.32.130 k8s-node2
EOF
2.3 關(guān)閉防火墻 seliunx 三個機(jī)器都一樣執(zhí)行
# 關(guān)閉防火墻
systemctl stop firewalld
systemctl disable firewalld
# 關(guān)閉selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0# 臨時
# 關(guān)閉swap:
swapoff -a # 臨時
vim /etc/fstab # 永久
# /dev/mapper/centos-swap swap swap defaults 0 0 # 禁用就好了
# 將橋接的IPv4流量傳遞到iptables的鏈:
cat >/etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables =1
net.bridge.bridge-nf-call-iptables =1
EOF
sysctl --system # 生效
2.4 時間同步三臺機(jī)器都要操作
yum install ntpdate -y
ntpdate ntp1.aliyun.com
設(shè)置定時自動同步,三臺機(jī)器都要操作
crontab -e
*/10 * * * * ntpdate ntp1.aliyun.com
*/12 * * * * hwclock --systohc
*/13 * * * * clock -w
2.5 安裝Docker 三臺機(jī)器都要安裝
# 修改yum 源,三臺機(jī)器都要修改
cd /etc/yum.repos.d/
[root@k8s-master yum.repos.d]# mkdir bak
[root@k8s-master yum.repos.d]# mv *.repo bak/
# 安裝yum 源,三臺機(jī)器都要安裝
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 安裝docker,三臺機(jī)器都要安裝
yum -y install docker-ce
systemctl enable docker && systemctl start docker
# 配置鏡像下載加速器
cat >/etc/docker/daemon.json << EOF
{
"registry-mirrors":["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF
systemctl restart docker
2.6 添加阿里yum倉庫源 安裝 kubeadm 用的 三個節(jié)點(diǎn)
cat >/etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2.7 開始安裝 kubeadm , kubelet ,kubectl 三個節(jié)點(diǎn)都要安裝
# 執(zhí)行安裝版本號,不指定默認(rèn)最新版本
yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9
systemctl enable kubelet
# 解釋和注釋
kubelet:systemd守護(hù)進(jìn)程管理
kubeadm:部署工具
kubectl:k8s命令行管理工具
2.8 開始部署 kubernetes master,在k8s-master機(jī)器上執(zhí)行
# 在 192.168.32.128 master 上執(zhí)行 要寫master ip
[root@k8s-master ~]# kubeadm init \
--apiserver-advertise-address=192.168.32.128 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.20.9 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
# 參數(shù)解釋
–apiserver-advertise-address 集群通告地址
–image-repository 由于默認(rèn)拉取鏡像地址k8s.gcr.io國內(nèi)無法訪問,這里指定阿里云鏡像倉庫地址。
–kubernetes-version K8s版本,與上面安裝的一致
–service-cidr 集群內(nèi)部虛擬網(wǎng)絡(luò),Pod統(tǒng)一訪問入口
–pod-network-cidr Pod網(wǎng)絡(luò),與下面部署的CNI網(wǎng)絡(luò)組件yaml中保持一致
Master 初始化成功之后,會生成節(jié)點(diǎn)加入指令。
2.9 拷貝kubectl使用的連接k8s認(rèn)證文件到默認(rèn)路徑
# 在master 上執(zhí)行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 查看節(jié)點(diǎn)情況,由于還沒部署flannel 通信插件,所以status 是notready
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 2m v1.20.9
2.10 加入kubernetes node 在32.129 和 32.130 上執(zhí)行
# 這個東西是自己生成的 這個東西是那個界節(jié)點(diǎn)加入 在那個機(jī)器執(zhí)行
[root@k8s-node1 ~]# kubeadm join 192.168.32.128:6443--token esce21.q6hetwm8si29qxwn \
--discovery-token-ca-cert-hash sha256:00603a05805807501d7181c3d60b478788408cfe6cedefedb1f97569708be9c5
默認(rèn)token 有效期為24 小時 當(dāng)過期后 改token 就不可用了 這時就需要重新創(chuàng)建token,操作如下:
[root@k8s-master ~]# kubeadm token create --print-join-command
2.11 部署容器網(wǎng)絡(luò)(CNI)
這里使用Flannel作為Kubernetes容器網(wǎng)絡(luò)方案,解決容器跨主機(jī)網(wǎng)絡(luò)通信。
Flannel是CoreOS維護(hù)的一個網(wǎng)絡(luò)組件,F(xiàn)lannel為每個Pod提供全局唯一的IP,F(xiàn)lannel使用ETCD來存儲Pod子網(wǎng)與Node IP之間的關(guān)系。flanneld守護(hù)進(jìn)程在每臺主機(jī)上運(yùn)行,并負(fù)責(zé)維護(hù)ETCD信息和路由數(shù)據(jù)包。
使用如下yaml 文件部署flannel網(wǎng)絡(luò)組件
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
[root@k8s-master ~]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.2
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: quay.io/coreos/flannel:v0.15.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.15.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
# 創(chuàng)建網(wǎng)絡(luò)插件
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
# 查看節(jié)點(diǎn)時候都ready 狀態(tài)
kubectl get nodes -owide
2.12 部署官方 Doshboard (UI)
[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
默認(rèn)Dashboard只能集群內(nèi)部訪問,修改Service為NodePort類型,暴露到外部:
[root@k8s-master ~]# vim recommended.yaml
...
kind:Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dadashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port:443
targetPort:8443
nodePort: 30006 #添加的端口 30006
selector:
k8s-app: kubernetes-dashboard
type: NodePort 添加的
...
[root@k8s-master ~]# kubectl apply -f recommended.yaml
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-6b4884c9d5-gl8nr 1/1 Running 0 13m
kubernetes-dashboard-7f99b75bf4-89cds 1/1 Running 0 13m
訪問地址:https://NodeIP:30006
創(chuàng)建service account并綁定默認(rèn)cluster-admin管理員集群角色
創(chuàng)建用戶
[root@k8s-master ~]# kubectl create serviceaccount dashboard-admin -n kube-system
# 用戶授權(quán)
[root@k8s-master ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 獲取用戶Token
[root@k8s-master ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
使用輸出的Token登入 Dashboard 最后就是
訪問地址 192.168.32.128:30006 然后 token 登入就行了
2.13 服務(wù)無中斷平滑發(fā)布
Kubernetes支持名為Rolling Update的功能,允許您不間斷地,
接近幾乎無縫地平滑升級部署應(yīng)用程序 ,即在不停止對外服務(wù)的前提下完成應(yīng)用的更新。
什么是滾動更新?
為了應(yīng)用升級部署時候k8s不停服達(dá)到用戶無感知,Kubernetes支持稱為滾動更新的功能。此功能允許您按順序更新pod,一次更新一個(按照配置比例),而不是一次停止/更新整個pod。使發(fā)布版本更新和回滾而不會中斷服務(wù)
# 滾動更新示例
上傳測試項目:
拉取基礎(chǔ)環(huán)境 python:3.7
docker pull python:3.7
部署執(zhí)行Dockerfile 打包過程
Vim Dockerfile
FROM python:3.7
VOLUME /tmp
ADD . /work
WORKDIR /work
RUN rm -rf simpleui
RUN /usr/local/bin/python -m pip install --upgrade pip
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ -r requirements.txt
RUN pip3 install django-simpleui -U
EXPOSE 8080
ENTRYPOINT ["python","manage.py","runserver" ,"0.0.0.0:8080"]
執(zhí)行Dockerfile打包,生成本地制品鏡像,(注意后面的點(diǎn),代表當(dāng)前目錄的Dockerfile 文件)
[root@k8s-master hospital_manager]# docker build -t hospital:v1.23.6.20.1 .
hospital.yaml 文件詳解:
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: hospital
name: deployment-hospital #deployment名字
namespace: encop #命名空間
spec:
replicas: 4 #開啟的pods 副本
progressDeadlineSeconds: 600 #等待多少秒才能確定Deployment進(jìn)程是卡住的,如果還是卡住,則標(biāo)記次pods 為false
minReadySeconds: 10 #滾動升級時10s后認(rèn)為該pod就緒可用
selector:
matchLabels:
app: hospital
strategy:
rollingUpdate: #滾動更新
maxSurge: 1 #滾動更新期間可以創(chuàng)建的pod的最大數(shù)量超過指定數(shù)量的pod。1表示當(dāng)一個新的pod被創(chuàng)建才會刪除一個pod,以此類推??梢允蔷唧w的整數(shù),也可以是百分百 默認(rèn)值為25%
maxUnavailable: 25% #滾動更新期間不可用的pods 數(shù)量
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: hospital
spec:
containers:
- image: test/hospital:v1.23.6.20.1 #鏡像版本
imagePullPolicy: Never #表示拉取本地鏡像,不拉去遠(yuǎn)程鏡像
name: hospital
resources: {}
volumeMounts:
- name: time-config
mountPath: /etc/localtime
readOnly: true
volumes:
- name: time-config
hostPath:
path: /etc/localtime
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: hospital
name: hospital
namespace: encop
spec:
ports:
- nodePort: 32121
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: hospital
type: NodePort
sessionAffinity: ClientIP #多pods時達(dá)到會話保持的狀態(tài)
status:
loadBalancer: {}
創(chuàng)建kubectl apply -f hospital.yaml
[root@k8s-master v1]# kubectl get pods -n encop -owide
查看pods的版本號
v1.23.6.20.1版本號
[root@k8s-master v1]# kubectl get pod -w -n encop
在192.168.32.129 node1機(jī)器上使用腳本測試服務(wù)是否正常訪問!
[root@k8s-node1 ~]# cat check_serv.sh
#!/bin/bash
while true; do
curl -m 10 -s -o /dev/null 192.168.32.128:32121
if [ $? == 0 ]; then
echo "服務(wù)正常!"
else
echo "服務(wù)不正常!"
fi
sleep 2
done
目前是正常的!
目前瀏覽器是可以正常訪問的
修改程序標(biāo)題改為v2,修改此處,更新后應(yīng)該為v2字樣
重新打包build 鏡像
[root@k8s-master hospital_manager]# docker build -t test/hospital:v1.23.6.20.2 .
執(zhí)行滾動更新前服務(wù)都是正常訪問的
監(jiān)控服務(wù)更新變化
[root@k8s-master v1]# kubectl get pod -w -n encop
執(zhí)行更新操作,將版本號從v1.23.6.20.1 升級到v1.23.6.20.2文章來源:http://www.zghlxwxcb.cn/news/detail-671569.html
kubectl set image deployment/deployment-hospital hospital=test/hospital:v1.23.6.20.2 -n encop
部署過程中服務(wù)一直都是正常訪問的
網(wǎng)頁標(biāo)題也變了,服務(wù)也沒有出現(xiàn)終端問題,滾動更新不停服務(wù)升級成功!版本號也更新到v1.23.6.20.2
文章來源地址http://www.zghlxwxcb.cn/news/detail-671569.html
參考文章:http://idcsec.com/2019/03/05/kubernetes%E6%BB%9A%E5%8A%A8%E6%9B%B4%E6%96%B0%EF%BC%88%E6%97%A0%E4%B8%AD%E6%96%AD%E5%B9%B3%E6%BB%91%E5%8F%91%E5%B8%83%EF%BC%89/
附加:如果服務(wù)啟動時間比較長,可以添加探針,更加細(xì)致嚴(yán)謹(jǐn)
# 準(zhǔn)備就緒探針
readinessProbe:
# 使用 HTTP GET 方法檢查
httpGet:
# 檢查的路徑
# Windows 上使用 /sys-value-server/one-vo/12
# k8s 上使用 /sys-value-server/one-vo/12
path: /sys-value-server/one-vo/12 # 訪問次服務(wù)url 返回200,才證明此pods 啟動成功,流量才分配到此pods中
# 應(yīng)用程序端口號
port: 42130
# 初始化延遲 30 秒
initialDelaySeconds: 20
# 每隔 10 秒一次檢查
periodSeconds: 10
# 超時時間
timeoutSeconds: 5
# 連續(xù)成功的檢查次數(shù)
successThreshold: 1
# 連續(xù)失敗的檢查次數(shù)
failureThreshold: 3
到了這里,關(guān)于Kubeadm 部署k8s實現(xiàn)并且演示滾動不停服務(wù)更新的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!