二進(jìn)制文件-docker方式
1、準(zhǔn)備的服務(wù)器
角色 | ip | 組件 |
---|---|---|
k8s-master1 | 192.168.11.111 | kube-apiserver,kube-controller-manager,kube-scheduler,etcd |
k8s-master2 | 192.168.11.112 | kube-apiserver,kube-controller-manager,kube-scheduler,etcd |
k8s-node1 | 192.168.11.113 | kubelet,kube-proxy,docker |
k8s-node2 | 192.168.11.114 | kubelet,kube-proxy,docker |
Load Balancer(Master) | 192.168.11.115 | keepalived,haproxy |
Load Balancer(Backup) | 192.168.11.116 | keepalived,haproxy |
lb | 192.168.11.100(VIP) |
2、系統(tǒng)初始化
1、設(shè)置host 根據(jù)上面的規(guī)劃執(zhí)行
#安裝集群環(huán)境所需要的依賴
yum install wget sed vim tree jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
hostnamectl set-hostname k8s-master1 #在master執(zhí)行
hostnamectl set-hostname 其他省略
2、關(guān)閉防火墻
#關(guān)閉現(xiàn)有防火墻firewalld
systemctl disable firewalld
systemctl stop firewalld
firewall-cmd --state
3、關(guān)閉selinux
#關(guān)閉selinux
setenforce 0 #臨時(shí)
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config #永久(一定要重啟操作系統(tǒng))
sestatus
4、交換分區(qū)設(shè)置
#關(guān)閉 swap
swapoff -a #臨時(shí)
sed -ri 's/.*swap.*/#&/' /etc/fstab #永久
echo "vm.swappiness=0" >> /etc/sysctl.conf #永久 當(dāng)前操作系統(tǒng)沒辦法重啟。執(zhí)行此操作
sysctl -p
5、主機(jī)與IP地址解析(全部執(zhí)行)
#添加hosts[所有節(jié)點(diǎn)都添加]
cat > /etc/hosts << EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.11.111 k8s-master1
192.168.11.112 k8s-master2
192.168.11.113 k8s-node1
192.168.11.114 k8s-node2
192.168.11.115 k8s-lb1
192.168.11.116 k8s-lb2
EOF
#重啟生效
sysctl --system #生效
6、主機(jī)系統(tǒng)時(shí)間同步
#時(shí)間同步
yum install ntpdate -y
ntpdate time.windows.com
#制定時(shí)間同步計(jì)劃任務(wù) 一個(gè)小時(shí)做一次時(shí)間同步
#1、創(chuàng)建命令
crontab -e
#2、按a進(jìn)入編輯狀態(tài)
a
#3、編輯一下內(nèi)容
0 */1 * * * ntpdate time1.aliyun.com
#4、wq保存即可
wq
7、主機(jī)ipvs管理工具安裝及模塊加載
為集群節(jié)點(diǎn)安裝,負(fù)載均衡節(jié)點(diǎn)不用安裝(ha1、ha2不安裝)
yum -y install ipvsadm ipset sysstat conntrack libseccomp
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
#授權(quán)、運(yùn)行、檢查是否加載
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
8、主機(jī)系統(tǒng)優(yōu)化
#ulimit -SHn 65535 #臨時(shí)的
#永久的
cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
9、Linux內(nèi)核升級(jí)
#安裝perl
yum -y install perl
#導(dǎo)入el
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
#將elrepo導(dǎo)入到linux系統(tǒng)中(yam源)
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
#安裝kernel-lt版本,ml為最新穩(wěn)定版本,lt為長(zhǎng)期維護(hù)版本
yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
#設(shè)置剛才升級(jí)的為內(nèi)核優(yōu)先啟動(dòng)項(xiàng)
grub2-set-default 0
#執(zhí)行生效操作
grub2-mkconfig -o /boot/grub2/grub.cfg
10、開啟主機(jī)內(nèi)核路由轉(zhuǎn)發(fā)及網(wǎng)橋過濾
- 所有主機(jī)均需要操作。
配置內(nèi)核加載br_netfilter
和iptables
放行ipv6
和ipv4
的流量,確保集群內(nèi)的容器能夠正常通信。
#內(nèi)核優(yōu)化k8s.conf
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
#設(shè)置生效
sysctl --system
#加載br_netfilter
modprobe br_netfilter
#查看是否加載
lsmod | grep br_netfilter
9、免密登錄
作用:用于服務(wù)器之間的相互拷貝、免去密碼校驗(yàn)等。不是環(huán)境搭建的相關(guān)知識(shí)點(diǎn),可省略
#生成密鑰(在k8s-master1上執(zhí)行)
ssh-keygen
#然后將生成的秘鑰復(fù)制到需要傳輸文件的服務(wù)器上
ssh-copy-id root@k8s-master2
ssh-copy-id root@k8s-master3
ssh-copy-id root@k8s-worker1
#配置成功后,嘗試登錄某個(gè)服務(wù)器,校驗(yàn)是否生成成功
ssh root@k8s-master1
2、ETCD集群部署
查看 kubernetes2-binary-system(ca-etcd-apiserver).md文檔
3、負(fù)載均衡器安裝
ha1和ha2上執(zhí)行
1、安裝haproxy與keepalived
yum -y install haproxy keepalived
2、 HAProxy配置
注意住下面的k8s-master對(duì)應(yīng)的master所有節(jié)點(diǎn)ip
cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
#此處將所有的master配置到此處
server k8s-master1 192.168.11.111:6443 check
server k8s-master2 192.168.11.112:6443 check
EOF
3、KeepAlived
主從配置不一致。需要修改好對(duì)應(yīng)的信息
1、ha1配置以下文件
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
#指定當(dāng)前為主的master
state MASTER
interface ens33
#廣播的話暴漏的ip此處為當(dāng)前ip地址
mcast_src_ip 192.168.11.115
virtual_router_id 51
#優(yōu)先級(jí)100大于從服務(wù)的99
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
#配置規(guī)劃的虛擬ip
192.168.11.100
}
#配置對(duì)ha1進(jìn)行監(jiān)控的腳本
track_script {
#指定執(zhí)行腳本的名稱(vrrp_script chk_apiserver此處做了配置)
chk_apiserver
}
}
EOF
2、ha2配置以下文件
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
mcast_src_ip 192.168.11.116
virtual_router_id 51
priority 99
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.11.100
}
#配置對(duì)ha1進(jìn)行監(jiān)控的腳本
track_script {
#指定執(zhí)行腳本的名稱(vrrp_script chk_apiserver此處做了配置)
chk_apiserver
}
}
EOF
4、健康檢查腳本
ha1及ha2均要配置
1、準(zhǔn)備檢查腳本
cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
2、賦予權(quán)限
chmod +x /etc/keepalived/check_apiserver.sh
5、啟動(dòng)服務(wù)并驗(yàn)證
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
#查看啟動(dòng)狀態(tài)
systemctl status keepalived haproxy
#查看虛擬ip是否配置成功了
ip address show
4、部署Master
在master1上執(zhí)行,分發(fā)到其他需要的節(jié)點(diǎn)
1、下載kubernetes二進(jìn)制文件
1、創(chuàng)建工作目錄
#創(chuàng)建臨時(shí)工作目錄.用于將所有需要的文件都整理好放到此處,然后整體打包復(fù)制到工作目錄中,并且分發(fā)給master和對(duì)應(yīng)的worker節(jié)點(diǎn)上
mkdir -vp /usr/local/kubernetes/k8s
#真正的工作目錄
mkdir -vp /opt/kubernetes/{ssl,cfg,logs}
cd /usr/local/kubernetes/k8s
#創(chuàng)建日志目錄
mkdir -p /var/log/kubernetes
2、軟件包下載
#官網(wǎng)地址
#https://github.com/kubernetes/kubernetes/releases
#wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz
#wget https://dl.k8s.io/v1.27.3/kubernetes-server-linux-amd64.tar.gz
wget https://dl.k8s.io/v1.28.0/kubernetes-server-linux-amd64.tar.gz
#解壓縮
tar -xvf kubernetes-server-linux-amd64.tar.gz
#進(jìn)入目錄
cd kubernetes/server/bin/
#復(fù)制到系統(tǒng)可執(zhí)行目錄中
\cp -R kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/local/bin/
3、包分發(fā)
分發(fā)給master節(jié)點(diǎn)
#master節(jié)點(diǎn)創(chuàng)建目錄
mkdir -vp /usr/local/bin
#傳輸文件
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master2:/usr/local/bin/
分發(fā)給node節(jié)點(diǎn)
#node節(jié)點(diǎn)創(chuàng)建工作目錄
mkdir -vp /usr/local/bin
#4、分發(fā)kubelet、kube-proxy到服務(wù)器(分發(fā)給worker主機(jī))
for i in k8s-node1 k8s-node2;do scp kubelet kube-proxy $i:/usr/local/bin;done
4、生成token.cvs
Master上的apiserver啟用TLS認(rèn)證后,Node節(jié)點(diǎn)kubelet和kube-proxy要和kube-apiserver進(jìn)行通信,必須使用CA簽發(fā)的有效整數(shù)才可以,當(dāng)Node節(jié)點(diǎn)很多的時(shí)候,這種客戶端證書頒發(fā)需要大量工作,同樣也會(huì)增加集群擴(kuò)展復(fù)雜度。為了簡(jiǎn)化操作流程,k8s引入了TLS Bootstrapping機(jī)制來自動(dòng)頒發(fā)客戶端證書,kubelet會(huì)以一個(gè)低權(quán)限用戶向apiserver申請(qǐng)證書,kubelet的證書由apiserver動(dòng)態(tài)簽署.
cat > /opt/kubernetes/cfg/token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
2、部署apiserver
1、準(zhǔn)備apiserver證書文件
cd /usr/local/kubernetes/etcd
#1、使用自簽CA簽發(fā)kube-apiserver HTTPS證書
cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.11.111",
"192.168.11.112",
"192.168.11.113",
"192.168.11.114",
"192.168.11.115",
"192.168.11.116",
"192.168.11.100",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
2、生成apiserver證書
#生成證書文件(生成kube-apiserver-key.pem、kube-apiserver.pem、kube-apiserver.csr)
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#存放apiserver的ssl相關(guān)文件
mkdir -vp /opt/kubernetes/ssl/api-server
#將生成的密鑰文件移動(dòng)到臨時(shí)工作目錄中
\cp -R ca*.pem kube-apiserver*.pem /opt/kubernetes/ssl/api-server
3、kube-apiserver.conf配制文件
#apiserver
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--anonymous-auth=false \\
--bind-address=192.168.11.111 \\
--advertise-address=192.168.11.111 \\
--secure-port=6443 \\
--authorization-mode=Node,RBAC \\
--runtime-config=api/all=true \\
--enable-bootstrap-token-auth \\
--service-cluster-ip-range=10.96.0.0/16 \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=1-32767 \\
--tls-cert-file=/opt/kubernetes/ssl/api-server/kube-apiserver.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/api-server/kube-apiserver-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/api-server/ca.pem \\
--kubelet-client-certificate=/opt/kubernetes/ssl/api-server/kube-apiserver.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/api-server/kube-apiserver-key.pem \\
--service-account-key-file=/opt/kubernetes/ssl/api-server/ca-key.pem \\
--service-account-signing-key-file=/opt/kubernetes/ssl/api-server/ca-key.pem \\
--service-account-issuer=api \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/etcd.pem \\
--etcd-keyfile=/opt/etcd/ssl/etcd-key.pem \\
--etcd-servers=https://192.168.11.111:2379,https://192.168.11.112:2379 \\
--enable-swagger-ui=true \\
--allow-privileged=true \\
--apiserver-count=3 \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kube-apiserver-audit.log \\
--event-ttl=1h \\
--v=4"
EOF
4、kube-apiserver.service啟動(dòng)文件
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
切記查看啟動(dòng)是否有該文件$KUBE_APISERVER_OPTS
5、分發(fā)給其他master節(jié)點(diǎn)
注意:修改對(duì)應(yīng)kube-apiserver.conf的ip地址
#在對(duì)應(yīng)的master上創(chuàng)建文件夾
mkdir -vp /opt/kubernetes/cfg/
mkdir -vp /opt/kubernetes/ssl/api-server
#復(fù)制所有的文件到對(duì)應(yīng)服務(wù)器上
scp -r /opt/kubernetes/cfg/ k8s-master2:/opt/kubernetes/cfg/
scp -r /usr/lib/systemd/system/kube-apiserver.service k8s-master2:/usr/lib/systemd/system/
scp -r /opt/kubernetes/ssl/api-server/* k8s-master2:/opt/kubernetes/ssl/api-server
6、啟動(dòng)kube-apiserver
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver
3、部署kubectl
1、生成kubectl證書
#創(chuàng)建存放kubectl的證書文件的工作目錄
mkdir -vp /opt/kubernetes/ssl/kubectl
#切換到工作目錄下
cd /usr/local/kubernetes/etcd/
cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
說明:
后續(xù) kube-apiserver 使用 RBAC 對(duì)客戶端(如 kubelet、kube-proxy、Pod)請(qǐng)求進(jìn)行授權(quán);
kube-apiserver 預(yù)定義了一些 RBAC 使用的 RoleBindings,如 cluster-admin 將 Group system:masters 與 Role cluster-admin 綁定,該 Role 授予了調(diào)用kube-apiserver 的所有 API的權(quán)限;
O指定該證書的 Group 為 system:masters,kubelet 使用該證書訪問 kube-apiserver 時(shí) ,由于證書被 CA 簽名,所以認(rèn)證通過,同時(shí)由于證書用戶組為經(jīng)過預(yù)授權(quán)的 system:masters,所以被授予訪問所有 API 的權(quán)限;
注:
這個(gè)admin 證書,是將來生成管理員用的kubeconfig 配置文件用的,現(xiàn)在我們一般建議使用RBAC 來對(duì)kubernetes 進(jìn)行角色權(quán)限控制, kubernetes 將證書中的CN 字段 作為User, O 字段作為 Group;
"O": "system:masters", 必須是system:masters,否則后面kubectl create clusterrolebinding報(bào)錯(cuò)。
2、生成kubectl的admin相關(guān)證書文件
#生成證書文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#查看生成的文件
ls admin*.pem ca*.pem
admin-key.pem admin.pem ca-key.pem ca.pem
#復(fù)制生成的證書文件到工作目錄下
\cp -R ca*.pem admin*.pem /opt/kubernetes/ssl/kubectl/
3、生成kubeconfig配置文件
kube.config 為 kubectl 的配置文件,包含訪問 apiserver 的所有信息,如 apiserver 地址、CA 證書和自身使用的證書
切記:下面的ip以及配置文件別寫錯(cuò)了
#當(dāng)前命令在/usr/local/kubernetes/etcd下執(zhí)行
cd /usr/local/kubernetes/etcd/
#此處的ip配置的是VIP的地址
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.11.100:6443 --kubeconfig=kube.config
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
kubectl config use-context kubernetes --kubeconfig=kube.config
4、準(zhǔn)備kubectl配置文件并進(jìn)行角色綁定
#創(chuàng)建root下的文件夾
mkdir ~/.kube
#將上面第四步生成的文件復(fù)制到文件夾下面
\cp -R kube.config ~/.kube/config
#集群角色的綁定 并且通過/root/.kube/config文件進(jìn)行控制
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
5、將配置文件導(dǎo)入到系統(tǒng)環(huán)境變量中
export KUBECONFIG=$HOME/.kube/config
6、同步配置文件到其他master節(jié)點(diǎn)
注意:需要在master2和master3上提前創(chuàng)建/root/.kube,然后文件傳輸過去,即可使用kubectl客戶端工具
# 在master2和master3上創(chuàng)建該目錄
# mkdir /root/.kube
scp /root/.kube/config k8s-master2:/root/.kube/
scp /root/.kube/config k8s-master3:/root/.kube/
#導(dǎo)入系統(tǒng)環(huán)境變量中
export KUBECONFIG=$HOME/.kube/config
7、集群狀態(tài)查看
#查看集群信息
kubectl cluster-info
#查看集群組件狀態(tài)
kubectl get componentstatuses
#查看命名空間中資源對(duì)象
kubectl get all --all-namespaces
8、配置kubectl命令補(bǔ)全(可選)
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile
4、部署kube-controller-manager
1、準(zhǔn)備kube-controller-manager證書文件
#創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/ssl/kube-controller-manager
#創(chuàng)建kube-controller-manager文件
cd /usr/local/kubernetes/etcd/
cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"192.168.11.111",
"192.168.11.112"
],
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
2、生成kube-controller-manager證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#復(fù)制文件到工作目錄中
\cp -r ca*.pem kube-controller-manager*.pem /opt/kubernetes/ssl/kube-controller-manager
3、創(chuàng)建kube-controller-manager.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.11.100:6443 --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
## 將生成的文件復(fù)制到工作目錄中
\cp -R kube-controller-manager.kubeconfig /opt/kubernetes/cfg/kube-controller-manager.kubeconfig
4、創(chuàng)建kube-controller-manager.conf配置文件
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/kube-controller-manager/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/kube-controller-manager/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--root-ca-file=/opt/kubernetes/ssl/kube-controller-manager/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/kube-controller-manager/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--tls-cert-file=/opt/kubernetes/ssl/kube-controller-manager/kube-controller-manager.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kube-controller-manager/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--v=2"
EOF
5、創(chuàng)建kube-controller-manager.service服務(wù)
cat > /usr/lib/systemd/system/kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
6、同步到其他master節(jié)點(diǎn)
#在其他的master上執(zhí)行
#mkdir -vp /opt/kubernetes/ssl/kube-controller-manager
#復(fù)制所有的文件到對(duì)應(yīng)服務(wù)器上
scp -r /opt/kubernetes/cfg/kube-controller-manager* k8s-master2:/opt/kubernetes/cfg/
scp -r /usr/lib/systemd/system/kube-controller-manager.service k8s-master2:/usr/lib/systemd/system/
scp -r /opt/kubernetes/ssl/kube-controller-manager/* k8s-master2:/opt/kubernetes/ssl/kube-controller-manager
7、啟動(dòng)kube-controller-manager
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
5、部署kube-scheduler
1、準(zhǔn)備kube-scheduler證書文件
#創(chuàng)建證書工作目錄
mkdir -vp /opt/kubernetes/ssl/kube-scheduler
cd /usr/local/kubernetes/etcd
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.11.111",
"192.168.11.112"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
2、生成kube-scheduler證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#復(fù)制生成的ssl證書文件到工作目錄中
\cp -R ca*.pem kube-scheduler*.pem /opt/kubernetes/ssl/kube-scheduler
3、創(chuàng)建kube-scheduler.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.11.100:6443 --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
#將生成的文件復(fù)制到工作目錄下
\cp -R kube-scheduler.kubeconfig /opt/kubernetes/cfg/kube-scheduler.kubeconfig
4、創(chuàng)建kube-scheduler.conf配置文件
cat > /opt/kubernetes/cfg/kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS=" \
--leader-elect=true \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \
--v=2"
EOF
5、創(chuàng)建kube-scheduler.service
cat > /usr/lib/systemd/system/kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
6、分發(fā)依賴
#在其他的master上執(zhí)行
#mkdir -vp /opt/kubernetes/ssl/kube-scheduler
#復(fù)制所有的文件到對(duì)應(yīng)服務(wù)器上
scp -r /opt/kubernetes/cfg/kube-scheduler* k8s-master2:/opt/kubernetes/cfg/
scp -r /usr/lib/systemd/system/kube-scheduler.service k8s-master2:/usr/lib/systemd/system/
scp -r /opt/kubernetes/ssl/kube-scheduler/* k8s-master2:/opt/kubernetes/ssl/kube-scheduler
7、啟動(dòng)kube-scheduler
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler
6、集群狀態(tài)查詢
#查看集群信息
kubectl cluster-info
#查看集群組件狀態(tài)
kubectl get componentstatuses
#查看命名空間中資源對(duì)象
kubectl get all --all-namespaces
5、部署Node
1、安裝docker
1、安裝docker
#1、更新yum
yum update
#2、安裝需要的軟件包
yum install -y yum-utils device-mapper-persistent-data lvm2
#3、設(shè)置阿里云鏡像
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
## 查看相關(guān)版本
# yum list installed | grep docker
#http://mirrors.163.com/docker-ce/linux/centos/7.6/x86_64/stable/Packages/
#下載k8s匹配的版本(k8s為1.27 docker為v20.10.18...v20.10.21)
#yum install docker-ce-20.10.20-3.el7
yum install docker-ce
docker -v
#6、設(shè)置開機(jī)啟動(dòng)
systemctl daemon-reload
#設(shè)置docker開機(jī)自啟
systemctl enable --now docker
systemctl start docker
systemctl status docker
#關(guān)閉docker開機(jī)自啟
#systemctl disable docker
2、環(huán)境配置
- 1.24之后官方棄用docker說明:
https://kubernetes.io/zh-cn/docs/setup/production-environment/container-runtimes/#docker注意
exec-opts是啟動(dòng)kubelet需要的參數(shù),和kubelet.json的"cgroupDriver"保持一致
mkdir -p /etc/docker
#準(zhǔn)備docker的環(huán)境配置以及工作目錄
#k8s 1.24之后需要指定守護(hù)進(jìn)程加載方式native.cgroupdriver=systemd
cat > /etc/docker/daemon.json << EOF
{
"data-root":"/usr/local/dockerWorkspace/",
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors" : [
"https://registry.docker-cn.com",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.ustc.edu.cn",
"https://cr.console.aliyun.com"
]
}
EOF
#重啟docker
systemctl restart docker
1.Docker中國區(qū)官方鏡像 https://registry.docker-cn.com
2.網(wǎng)易 http://hub-mirror.c.163.com
3.ustc https://docker.mirrors.ustc.edu.cn
4.中國科技大學(xué) https://docker.mirrors.ustc.edu.cn
3、cri-dockerd安裝
#下載文件
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el7.x86_64.rpm
#安裝cri-dockerd
yum install cri-dockerd-0.3.4-3.el7.x86_64.rpm
#然后編輯cri-dockerd.service[安裝后文件存在此處]
vim /usr/lib/systemd/system/cri-docker.service
#修改ExecStart配置
#ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7 --container-runtime-endpoint fd://
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8 --container-runtime-endpoint fd://
#啟動(dòng)cri-docker
systemctl enable --now cri-docker
#查看啟動(dòng)狀態(tài)
systemctl status cri-docker
2、安裝kubelet
(因?yàn)镃A、cfssl都在master1上,所以在master1上生成證書相關(guān)內(nèi)容,復(fù)制并分發(fā)到node節(jié)點(diǎn)上)
1、復(fù)制ca證書
node節(jié)點(diǎn)上,安裝以下內(nèi)容kubelet、kube-proxy、docker[或者containerd]
#創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/ssl/{kubelet,kube-proxy}
\cp -R /usr/local/kubernetes/etcd/ca*.pem /opt/kubernetes/ssl/kubelet/
2、生成kubelet-bootstrap.kubeconfig配置文件
#進(jìn)入工作目錄
cd /usr/local/kubernetes/etcd
#獲取token.csv文件的值取出來賦值給下面的 可以用$BOOTSTRAP_TOKEN查看
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /opt/kubernetes/cfg/token.csv)
#設(shè)置管理的集群
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.11.100:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
#設(shè)置集群證書
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
#創(chuàng)建安全上下文
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
#設(shè)置 使用剛才設(shè)置的安全上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
#設(shè)置集群的角色
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
#將角色和用戶綁定
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
#將生成的配置文件復(fù)制到工作目錄中
\cp -R kubelet-bootstrap.kubeconfig /opt/kubernetes/cfg/
#查看 對(duì)角色cluster-system-anonymous的描述
kubectl describe clusterrolebinding cluster-system-anonymous
#查看kubelet-bootstrap角色用戶信息
kubectl describe clusterrolebinding kubelet-bootstrap
注意:
如果執(zhí)行中出現(xiàn)某個(gè)角色已存在,刪除即可
kubectl delete clusterrolebinding kubelet-bootstrap
3、同步證書文件到集群node節(jié)點(diǎn)上
注意:需要的證書文件都已生成,復(fù)制到k8s-node1節(jié)點(diǎn)上后,步驟四準(zhǔn)備kubelet.json等后續(xù)操作就不在k8s-master1上繼續(xù)執(zhí)行了。去k8s-node1上執(zhí)行。此處謹(jǐn)記在對(duì)應(yīng)的node上創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/{cfg,ssl,bin}
mkdir -vp /opt/kubernetes/ssl/kubelet/
#復(fù)制配置文件config和角色權(quán)限配置文件
for i in k8s-node1 k8s-node2;do scp /opt/kubernetes/cfg/kubelet* $i:/opt/kubernetes/cfg/;done
#復(fù)制ssl文件
for i in k8s-node1 k8s-node2;do scp /opt/kubernetes/ssl/kubelet/* $i:/opt/kubernetes/ssl/kubelet/;done
4、準(zhǔn)備kubelet.json文件node上執(zhí)行
里面的#注釋去掉,否則啟動(dòng)失敗
cat > /opt/kubernetes/cfg/kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/opt/kubernetes/ssl/kubelet/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
#本機(jī)ip(集群則需要注意,配置當(dāng)前node節(jié)點(diǎn)ip)
"address": "192.168.11.113",
"port": 10250,
"readOnlyPort": 10255,
#和config.toml的systemd_cgroup = true對(duì)應(yīng)的cgroup保持一致
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF
5、準(zhǔn)備kubelet.service文件
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig
此處沒有該文件,后續(xù)啟動(dòng)會(huì)自動(dòng)生成該文件
cat > /usr/lib/systemd/system/kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
#After=containerd.service
#Requires=containerd.service
#啟動(dòng)的容器是docker則配置docker,如果是containerd則配置containerd.service
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/kubelet-bootstrap.kubeconfig \
--cert-dir=/opt/kubernetes/ssl/kubelet \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.json \
--rotate-certificates \
--container-runtime-endpoint=unix:///run/cri-dockerd.sock \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8 \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
-
注意
containerd指定為:–container-runtime-endpoint=unix:///run/containerd/containerd.sock
docker指定為:–container-runtime-endpoint=unix:///run/cri-dockerd.sock
Unit:特別注意指定的docker容器,還是contained容器
下載異常使用如下配置 --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8 \
6、同步到其他node節(jié)點(diǎn)
#切記,一定要修改里面的ip地址
for i in k8s-node2 k8s-node3;do scp /opt/kubernetes/cfg/kubelet.json $i:/opt/kubernetes/cfg ;done
for i in k8s-node2 k8s-node3;do scp /usr/lib/systemd/system/kubelet.service $i:/usr/lib/systemd/system ;done
7、創(chuàng)建目錄并啟動(dòng)
(在對(duì)應(yīng)的node機(jī)器上執(zhí)行,別在master上執(zhí)行)
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
8、啟動(dòng)報(bào)錯(cuò)排查
Aug 4 19:12:40 k8s-master03 kubelet: E0804 19:12:40.726264 21343 run.go:74] "command failed" err="failed to run Kubelet: validate service connection: CRI v1 runtime API is not implemented for endpoint \"unix:///run/containerd/containerd.sock\": rpc error: code = Unimplemented desc = unknown service runtime.v1.RuntimeService"
使用命令查看容器啟動(dòng)情況ps -ef|grep containerd
刪除對(duì)應(yīng)的docker即可 然后重啟kubelet
3、安裝kube-proxy
(因?yàn)镃A、cfssl都在master1上,所以在master1上生成證書相關(guān)內(nèi)容,復(fù)制并分發(fā)到node節(jié)點(diǎn)上)kube-proxy:
為容器提供現(xiàn)相應(yīng)的網(wǎng)絡(luò)支持
1、創(chuàng)建kube-proxy證書請(qǐng)求文件
#生成工作目錄
mkdir -vp /opt/kubernetes/ssl/kube-proxy
mkdir -vp /opt/kubernetes/yaml
cd /usr/local/kubernetes/etcd
#ca-config文件
cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
2、生成證書
#生成證書
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
#復(fù)制到工作目錄中
\cp -R ca*.pem kube-proxy*.pem /opt/kubernetes/ssl/kube-proxy
3、創(chuàng)建kube-proxy.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.11.100:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
#將生成的證書文件復(fù)制到工作目錄中
\cp -R kube-proxy.kubeconfig /opt/kubernetes/cfg/
4、同步證書文件到集群node節(jié)點(diǎn)上
注意:需要的證書文件都已生成,復(fù)制到k8s-node1節(jié)點(diǎn)上后,步驟四準(zhǔn)備kubelet.json等后續(xù)操作就不在k8s-master1上繼續(xù)執(zhí)行了。去k8s-node1上執(zhí)行。此處謹(jǐn)記在對(duì)應(yīng)的node上創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/ssl/kube-proxy
mkdir -vp /opt/kubernetes/yaml
#復(fù)制配置文件config和角色權(quán)限配置文件
for i in k8s-node1 k8s-node2;do scp /opt/kubernetes/cfg/kube-proxy* $i:/opt/kubernetes/cfg/;done
#復(fù)制ssl文件
for i in k8s-node1 k8s-node2;do scp /opt/kubernetes/ssl/kube-proxy/* $i:/opt/kubernetes/ssl/kube-proxy/;done
5、創(chuàng)建kube-proxy.service服務(wù)配置文件node上執(zhí)行
注意:
每臺(tái)服務(wù)器記得修改對(duì)應(yīng)的ip地址
cat > /opt/kubernetes/yaml/kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.11.113
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.11.113:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.11.113:10249
mode: "ipvs"
EOF
6、創(chuàng)建kube-proxy.service服務(wù)管理文件
cat > /usr/lib/systemd/system/kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--config=/opt/kubernetes/yaml/kube-proxy.yaml \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
6、同步文件到集群節(jié)點(diǎn)
在對(duì)應(yīng)的node上創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/yaml
for i in k8s-node1 k8s-node2;do scp /usr/lib/systemd/system/kubelet.service $i:/usr/lib/systemd/system/;done
for i in k8s-node1 k8s-node2;do scp /opt/kubernetes/yaml/kube-proxy.yaml $i:/opt/kubernetes/yaml/;done
7、服務(wù)啟動(dòng)
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
6、部署CNI網(wǎng)絡(luò)master安裝
flannel和cacico(推薦)任選其一
1、創(chuàng)建工作目錄
mkdir -vp /opt/kubernetes/systemYaml
cd /opt/kubernetes/systemYaml
2、flannel的安裝
- CNI網(wǎng)絡(luò)插件的主要功能就是為了實(shí)現(xiàn)pod資源能夠跨宿主機(jī)進(jìn)行通信
要在每個(gè)pod節(jié)點(diǎn)上安裝
Flannel 由CoreOS開發(fā),用于解決docker集群跨主機(jī)通訊的覆蓋網(wǎng)絡(luò)(overlay network),它的主要思路是:預(yù)先留出一個(gè)網(wǎng)段,每個(gè)主機(jī)使用其中一部分,
然后每個(gè)容器被分配不同的ip;讓所有的容器認(rèn)為大家在同一個(gè)直連的網(wǎng)絡(luò),底層通過UDP/VxLAN/Host-GW等進(jìn)行報(bào)文的封裝和轉(zhuǎn)發(fā)
解決Pod內(nèi)容器與容器之間的通信
Flannel實(shí)質(zhì)上是一種“覆蓋網(wǎng)絡(luò)(overlaynetwork)”,也就是將TCP數(shù)據(jù)包裝在另一種網(wǎng)絡(luò)包里面進(jìn)行路由轉(zhuǎn)發(fā)和通信,目前已經(jīng)支持udp、vxlan、host-gw、aws-vpc、gce
和alloc路由等數(shù)據(jù)轉(zhuǎn)發(fā)方式,默認(rèn)的節(jié)點(diǎn)間數(shù)據(jù)通信方式是UDP轉(zhuǎn)發(fā)。
它的功能是讓集群中的不同節(jié)點(diǎn)主機(jī)創(chuàng)建的Docker容器都具有全集群唯一的虛擬IP地址。
Flannel的設(shè)計(jì)目的就是為集群中的所有節(jié)點(diǎn)重新規(guī)劃IP地址的使用規(guī)則,從而使得不同節(jié)點(diǎn)上的容器能夠獲得同屬一個(gè)內(nèi)網(wǎng)且不重復(fù)的IP地址,并讓屬于不同節(jié)點(diǎn)上的容器能夠
直接通過內(nèi)網(wǎng)IP通信。
- Flannel工作原理
node1上的pod1 要和node2上的pod1進(jìn)行通信
1.數(shù)據(jù)從node1上的Pod1源容器中發(fā)出,經(jīng)由所在主機(jī)的docker0 虛擬網(wǎng)卡轉(zhuǎn)發(fā)到flannel0虛擬網(wǎng)卡;
2.再由flanneld把pod ip封裝到udp中(里面封裝的是源pod IP和目的pod IP);
3.根據(jù)在etcd保存的路由表信息,通過物理網(wǎng)卡發(fā)送給目的node2的flanneld,來進(jìn)行解封裝暴露出udp里的pod IP;
4.最后根據(jù)目的pod IP經(jīng)flannel0虛擬網(wǎng)卡和docker0虛擬網(wǎng)卡轉(zhuǎn)發(fā)到目的pod中,最后完成通信
1、配置10-flannel.conflistist
mkdir -p /etc/cni/net.d
#下載cni
#wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
cat > /etc/cni/net.d/10-flannel.conflist << EOF
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOF
2、github下載(網(wǎng)速較慢)
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
3、手動(dòng)創(chuàng)建yaml文件(推薦使用)
cat > /opt/kubernetes/systemYaml/kube-flannel.yml << "EOF"
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: docker.io/flannel/flannel:v0.22.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: docker.io/flannel/flannel:v0.22.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF
4、部署應(yīng)用
#下載網(wǎng)絡(luò)插件
kubectl apply -f kube-flannel.yml
#查看下載、以及運(yùn)行情況
kubectl get pods -n kube-flannel
#查看集群運(yùn)行情況
kubectl get nodes
5、注意
注意
:
1、本人在部署flannel的時(shí)候,出現(xiàn)了好多問題,error getting ClusterInformation: connection is unauthorized: Unauthorized
其過程是安裝了calico后,卸載掉,重新安裝flannel出現(xiàn)安裝不上的問題,報(bào)以上錯(cuò)誤,后來經(jīng)過查詢,找到解決辦法
解決辦法是刪除掉 /etc/cni/net.d/ 目錄下的 calico 配置文件即可。所有的master和node節(jié)點(diǎn)都排查一遍
2、特別注意的是第一步的10-flannel.conflistist要執(zhí)行,。不然讀取不到配置文件會(huì)報(bào)錯(cuò)
3、下載calico
說明:
集群網(wǎng)絡(luò)插件
安裝參考網(wǎng)址:https://projectcalico.docs.tigera.io/about/about-calico
1、下載calico.yaml
#官網(wǎng)版本
https://docs.tigera.io/archive
#下載指定版本
#應(yīng)用operator資源清單文件
wget https://raw.githubusercontent.com/projectcalico/calico/master/manifests/calico.yaml --no-check-certificate
#放開此配置
4894 - name: CALICO_IPV4POOL_CIDR
4895 value: "10.244.0.0/16"
文件位置: doc/kubernetes/calicoFile/calico.yaml
2、修改應(yīng)用文件
k8s和calico的版本對(duì)應(yīng)關(guān)系官網(wǎng)查看:https://projectcalico.docs.tigera.io/archive/v3.20/getting-started/kubernetes/requirements
#啟動(dòng)應(yīng)用
kubectl apply -f calico.yaml
#查看下載、以及運(yùn)行情況
kubectl get pods -n kube-system
#查看集群運(yùn)行情況
kubectl get nodes
4、注意
如果是使用flannel網(wǎng)絡(luò)插件,這兩個(gè)cidr可以不一樣,無所謂啦,因?yàn)樗玫氖莍ptables,那如果是calico,用的是ipvs,cidr必須保持一致
在二進(jìn)制方式安裝的,這個(gè)cidr一般是定義在kube-proxy和kube-controller-manager這兩個(gè)核心服務(wù)的配置文件內(nèi)的。
kube-controller-manager.conf文件的–cluster-cidr=和kube-proxy.yaml的clusterCIDR必須保持一致,否則pod報(bào)錯(cuò)
7、部署CoreDNS
提供pod的域名解析
coredns在K8S中的用途,主要是用作服務(wù)發(fā)現(xiàn),也就是服務(wù)(應(yīng)用)之間相互定位的過程。
官網(wǎng):https://github.com/coredns/coredns/tags
https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
5、創(chuàng)建coredns.yaml(此版本為1.8.4)
cat > /opt/kubernetes/systemYaml/coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.10.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
6、啟動(dòng)并應(yīng)用
kubectl apply -f coredns.yaml
kubectl get pods -A
kubectl get pods -n kube-system
8、部署Web UI
1、下載部署
#進(jìn)入到系統(tǒng)相關(guān)的yaml的工作目錄
cd /opt/kubernetes/systemYaml
#官網(wǎng)地址
https://github.com/kubernetes/dashboard
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard
#下載
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml --no-check-certificate
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml --no-check-certificate
#找對(duì)應(yīng)支持的版本號(hào)
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.2/aio/deploy/recommended.yaml
2、修改配置信息
#修改如下內(nèi)容
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 8443
selector:
k8s-app: kubernetes-dashboard
3、啟動(dòng)部署
#啟動(dòng)
kubectl apply -f recommended.yaml
#查看啟動(dòng)情況
kubectl get svc -n kubernetes-dashboard
kubectl get pods -n kubernetes-dashboard
#訪問
https://IP+SVCPort
4、通過Token令牌登入
1、下載文件
#創(chuàng)建一個(gè) ClusterRoleBinding 對(duì)象,并賦予cluster-admin權(quán)限,即訪問整個(gè)集群的權(quán)限,包括查看和修改所有資源的權(quán)限
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
#查看secret
kubectl get secret -n kubernetes-dashboard
#獲取token
kubectl describe secret $(kubectl get secret -n kubernetes-dashboard|grep kubernetes-dashboard-token*|awk '{print $1}') -n kubernetes-dashboard|egrep -w token:
2、復(fù)制登錄接口
eyJhbGciOiJSUzI1NiIsImtpZCI6Ik53ZnFsLXZOQmJrZnl3NkE0MGpmczJLZGt6N0JsNlNWb0FuVXFZQm55eVUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1tOWJoeCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjI1Njg0NzJjLTYyOTQtNDdmNS04NWRlLWUyYWYxNTg4NmYwOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.pZlDEHlbAYEwmNtL8NKt1hxEwNW5ZvOVb_e1uIaVHs5FI7EDTPezm-_4pi1ITkyoRn6gjILe_XnKRpNLSK39EvRQFO0YWY_huxFnPxOmIG-YNN8Xxy3PTK65O1cMFpzK2BrKTrAd8-5IaNigALmOT7cVzvlK2HBn3bAoL-lVVXjFjFzvJM62P36e7aqpCdZ4pZjyKPN2FWLQtqagd2Omitoqc_kyF_L67wMnf2bPcDu7xGqDnTctHRsBve_LtKyfQUVJTn-wuQxX3YOrfYLXON0qmzpT4SBdYNoAoKMEYmdzT-LnSrjbSm3pHz4IdFEI-KB1zpW7lCHFm6HzpELJ5g
5、Kubeconfig登錄
#1、創(chuàng)建cluster集群
mkdir -vp /etc/kubernetes/webUi
#進(jìn)入ca生成證書所在的目錄,ca.crt的目錄
cd /usr/local/kubernetes/etcd/
#--server指定的是訪問apiserver的地址,此處我的是keepalived代理地址
kubectl config set-cluster kubernetes --certificate-authority=ca.csr --server="https://192.168.11.100:6443" --embed-certs=true --kubeconfig=/opt/kubernetes/webUi/dashboard-admin.conf
#創(chuàng)建credentials
TOKEN=$(kubectl get secret $(kubectl get secret -n kubernetes-dashboard|grep kubernetes-dashboard-token*|awk '{print $1}') -n kubernetes-dashboard -o jsonpath={.data.token}|base64 -d)
kubectl config set-credentials dashboard-admin --token=$TOKEN --kubeconfig=/opt/kubernetes/webUi/dashboard-admin.conf
#創(chuàng)建context
kubectl config set-context dashboard-admin@kubernetes --cluster=kubernetes --user=dashboard-admin --kubeconfig=/opt/kubernetes/webUi/dashboard-admin.conf
#切換context的current-context是dashboard-admin@kubernetes
kubectl config use-context dashboard-admin@kubernetes --kubeconfig=/opt/kubernetes/webUi/dashboard-admin.conf
#然后進(jìn)入/opt/kubernetes/webUi目錄下,下載到本地,
文章來源:http://www.zghlxwxcb.cn/news/detail-806777.html
9、部署nginx
cat > /opt/kubernetes/yaml/nginx-test.yaml << "EOF"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: default # 指定命名空間,如果不想指定,可以將此行刪除
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: IfNotPresent
ports:
- name: http
protocol: TCP
containerPort: 80
resources:
limits:
cpu: "1.0"
memory: 512Mi
requests:
cpu: "0.5"
memory: 128Mi
---
apiVersion: v1
kind: Service
metadata:
annotations:
name: nginx-test-service
namespace: default # 指定命名空間,如果不想指定,可以將此行刪除**
spec:
ports:
- port: 80
targetPort: 80
nodePort: 80
protocol: TCP
selector:
app: nginx
sessionAffinity: None
type: NodePort
EOF
#部署應(yīng)用
kubectl apply -f nginx-test.yaml
#查看對(duì)外暴漏的外網(wǎng)端口
kubectl get pod,svc
文章來源地址http://www.zghlxwxcb.cn/news/detail-806777.html
到了這里,關(guān)于k8s-docker二進(jìn)制(1.28)的搭建的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!