国产 无码 综合区,色欲AV无码国产永久播放,无码天堂亚洲国产AV,国产日韩欧美女同一区二区

基于k8s的高性能高可用的web集群

這篇具有很好參考價(jià)值的文章主要介紹了基于k8s的高性能高可用的web集群。希望對(duì)大家有所幫助。如果存在錯(cuò)誤或未考慮完全的地方,請(qǐng)大家不吝賜教,您也可以點(diǎn)擊"舉報(bào)違法"按鈕提交疑問(wèn)。

項(xiàng)目信息

項(xiàng)目架構(gòu)圖

基于k8s的高性能高可用的web集群,kubernetes,負(fù)載均衡,容器,運(yùn)維,ansible,prometheus,grafana

項(xiàng)目描述

模擬公司里的k8s生產(chǎn)環(huán)境,部署web,MySQL,nfs,harbor,Prometheus,Jenkins等應(yīng)用,構(gòu)建一個(gè)高性能高可用的web集群

項(xiàng)目環(huán)境

CentOS7,k8s,docker,Prometheus,nfs,jumpserver,harbor,ansible,Jenkins等

項(xiàng)目步驟

ip規(guī)劃

k8s-master:192.168.121.101

k8s-node1:192.168.121.102

k8s-node2:192.168.121.103

nfs:192.168.121.104

harbor:192.168.121.105

firewalld:192.168.121.106

jumpserver:192.168.121.107

Prometheus:192.168.121.108

ansible:192.168.121.109

一、部署k8s集群內(nèi)的機(jī)器,一臺(tái)master,兩臺(tái)node

修改主機(jī)名

hostnamectl set-hostname master && bash
hostnamectl set-hostname node1 && bash
hostnamectl set-hostname node2 && bash

添加域名解析

vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.121.101 master
192.168.121.102 node1
192.168.121.103 node2

關(guān)閉firewalld、selinux

# 關(guān)閉防火墻
service firewalld stop

# 設(shè)置防火墻開(kāi)機(jī)不啟動(dòng)
systemctl disable firewalld

# 臨時(shí)關(guān)閉selinux
setenforce 0

# 永久關(guān)閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

配置靜態(tài)的IP地址

TYPE="Ethernet"
BOOTPROTO="none"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.121.101"
PREFIX=24
GATEWAY="192.168.2.1"
DNS1=114.114.114.114

關(guān)閉交換分區(qū)

為了提升k8s性能,默認(rèn)不允許使用交換分區(qū)

# 臨時(shí)關(guān)閉
swapoff -a

# 永久關(guān)閉
vim /etc/fstab
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

修改Linux內(nèi)核參數(shù)

# 加載網(wǎng)橋過(guò)濾、地址轉(zhuǎn)發(fā)功能
modprobe br_netfilter
modprobe overlay

# 若文件不存在,會(huì)創(chuàng)建一個(gè);若已存在,會(huì)覆蓋內(nèi)容
cat << EOF | tee /etc/modules-load.d/k8s.conf 
br_netfilter
overlay
EOF

# 修改/etc/sysctl.d/kubernetes.conf
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl -p

更新和配置軟件源

# 安裝基礎(chǔ)軟件包
yum install vim wget -y

# 添加阿里云yum源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

# 重新生成yum元數(shù)據(jù)緩存
yum clean all && yum makecache

# 配置阿里云Docker的yum倉(cāng)庫(kù)源
yum install yum-utils -y
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

配置ipvs功能

實(shí)現(xiàn)負(fù)載均衡

# 安裝ipset和ipvsadm工具
yum install ipset ipvsadm -y

# 添加需要加載的模塊寫(xiě)入腳本,保證在節(jié)點(diǎn)重啟后能自動(dòng)加載所需模塊
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# 為腳本添加執(zhí)行權(quán)限
chmod +x /etc/sysconfig/modules/ipvs.modules

# 執(zhí)行腳本
/bin/bash /etc/sysconfig/modules/ipvs.modules

同步時(shí)間

# 啟用chronyd服務(wù)
systemctl start chronyd
systemctl enable chronyd

# 設(shè)置時(shí)區(qū)
timedatectl set-timezone Asia/Shanghai

安裝docker環(huán)境

yum install docker-ce-20.10.24-3.el7 docker-ce-cli-20.10.24-3.el7 containerd.io -y

# 啟動(dòng)并設(shè)置開(kāi)機(jī)自動(dòng)開(kāi)啟
systemctl enable --now docker
# 驗(yàn)證docker狀態(tài)
systemctl status docker

配置docker

# 創(chuàng)建文件夾
mkdir -p /etc/docker

# 編輯配置
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": [
    "https://registry.docker-cn.com",
    "http://hub-mirror.c.163.com",
    "https://reg-mirror.qiniu.com",
    "https://docker.mirrors.ustc.edu.cn"
  ],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "data-root": "/opt/lib/docker"
}
EOF

# 重新加載配置
systemctl daemon-reload
# 重啟docker服務(wù)
systemctl restart docker

配置k8s集群環(huán)境

# 在k8s集群上操作
# 配置組件源
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 構(gòu)建本地yum緩存
yum makecache
 
# 安裝k8s
yum install -y kubeadm-1.23.17-0 kubelet-1.23.17-0 kubectl-1.23.17-0 --disableexcludes=kubernetes
 
cat <<EOF > /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF
 
 
# 啟動(dòng)并設(shè)置開(kāi)機(jī)自動(dòng)開(kāi)啟
systemctl enable --now kubelet

集群初始化

master節(jié)點(diǎn)初始化
# 在master上操作
# apiserver-advertise-address填集群里master的IP地址
[root@master ~]# kubeadm init \
    --kubernetes-version=v1.23.17 \
    --pod-network-cidr=10.224.0.0/16 \
    --service-cidr=10.96.0.0/12 \
    --apiserver-advertise-address=192.168.121.101 \
    --image-repository=registry.aliyuncs.com/google_containers

# 成功后有提示信息,做以下操作
mkdir -p $HOME/.kube 
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 得到的下面這條命令一會(huì)在node上執(zhí)行
kubeadm join 192.168.121.101:6443 --token 8yrrzd.993ke86hqr3aq72w \
	--discovery-token-ca-cert-hash sha256:6efa53848ef3465aeb871304b9614d5b3771c411fe1cbfeb02de974c448d117b
node節(jié)點(diǎn)初始化
# 在node1和node2上操作
# 把節(jié)點(diǎn)加入到集群里
[root@node1 ~]# kubeadm join 192.168.121.101:6443 --token 8yrrzd.993ke86hqr3aq72w \
	--discovery-token-ca-cert-hash sha256:6efa53848ef3465aeb871304b9614d5b3771c411fe1cbfeb02de974c448d117b
檢查集群節(jié)點(diǎn)狀態(tài),分配role
# 在master上操作
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
master   NotReady   control-plane,master   15m     v1.23.17
node1    NotReady   <none>                 6m31s   v1.23.17
node2    NotReady   <none>                 6m32s   v1.23.17

# 分配role,#在master上操作
[root@master ~]# kubectl label node node1 node-role.kubernetes.io/worker=worker
[root@master ~]# kubectl label node node2 node-role.kubernetes.io/worker=worker

安裝Calico網(wǎng)絡(luò)插件

# 在master上操作
[root@master ~]# kubectl apply -f https://docs.projectcalico.org/archive/v3.25/manifests/calico.yaml

# 驗(yàn)證節(jié)點(diǎn)的STATUS=Ready
[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES                  AGE   VERSION
master   Ready    control-plane,master   23m   v1.23.17
node1    Ready    worker                 14m   v1.23.17
node2    Ready    worker                 14m   v1.23.17

k8s配置ipvs

# 在master上操作
[root@master ~]# kubectl edit configmap kube-proxy -n kube-system
--> mode: "ipvs"
# 刪除所有kube-proxy pod
[root@master ~]# kubectl delete pods -n kube-system -l k8s-app=kube-proxy

二、部署防火墻服務(wù)器和堡壘機(jī),對(duì)web集群提供保護(hù)

部署防火墻服務(wù)器

# 在防火墻服務(wù)器上,配置了2塊網(wǎng)課
# ens33 NAT模式
IPADDR=192.168.121.106
PREFIX=24
GATEWAY=192.168.121.2
DNS1=114.114.114.114

# ens34 橋接模式
IPADDR=192.168.2.111
PREFIX=24
GATEWAY=192.168.2.1
DNS1=114.114.114.114

# 編寫(xiě)NAT規(guī)則腳本
[root@firewalld ~]# vim nat.sh
#!/bin/bash

# 開(kāi)啟路由功能
echo 1 >/proc/sys/net/ipv4/ip_forward

# 關(guān)閉防火墻
systemctl stop firewalld
systemctl disable firewalld

# 清除防火墻規(guī)則
iptables -F
iptables -t nat -F

# 啟用SNAT規(guī)則
# 將內(nèi)網(wǎng)192.168.121.0/24網(wǎng)段的私有IP地址偽裝為公網(wǎng)IP地址
iptables -t nat -A POSTROUTING -s 192.168.121.0/24  -o ens34  -j  MASQUERADE

iptables -t filter -P INPUT ACCEPT

# 在集群內(nèi)的master上操作,開(kāi)啟相應(yīng)的端口
[root@master ~]# cat openport.sh 
#!/bin/bash

# 打開(kāi)ssh
iptables -t filter -A INPUT -p tcp --dport 22 -j ACCEPT

# 打開(kāi)dns
iptables -t filter -A INPUT -p udp --dport 53 -s 192.168.121.0/24 -j ACCEPT

# 打開(kāi)dhcp
iptables -t filter -A INPUT -p udp --dport 67 -j ACCEPT

#打開(kāi)http和https端口
iptables -t filter -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 443 -j ACCEPT

#打開(kāi)MySQL端口
iptables -t filter -A INPUT -p tcp --dport 3306 -j ACCEPT

iptables -t filter -P INPUT DROP

部署堡壘機(jī)

# 執(zhí)行命令,一鍵安裝Jumpserver
[root@jumpserver ~]# curl -sSL https://resource.fit2cloud.com/jumpserver/jumpserver/releases/latest/download/quick_start.sh | bash

# Web 訪問(wèn)
http://192.168.121.107:80
默認(rèn)用戶(hù): admin  默認(rèn)密碼: admin

三、部署ansible實(shí)現(xiàn)業(yè)務(wù)的自動(dòng)化運(yùn)維

# 建立免密通道,在ansible主機(jī)上生成密鑰對(duì)
[root@ansible ~]# ssh-keygen -t rsa
# 上傳公鑰到所有服務(wù)器的root用戶(hù)家目錄下
[root@ansible ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.121.101
[root@ansible ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.121.102
[root@ansible ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.121.103
......

# 編寫(xiě)主機(jī)清單
[root@ansible ~]# cd /etc/ansible
[root@ansible ansible]# vim hosts
[master]
192.168.121.101
[node]
192.168.121.102
192.168.121.103
[nfs]
192.168.121.104
[harbor]
192.168.121.105
[firewalld]
192.168.121.106
[jumpserver]
192.168.121.107
[Prometheus]
192.168.121.108

四、部署nfs服務(wù)器,通過(guò)pv和pvc、卷的掛載,為web集群提供數(shù)據(jù)存儲(chǔ)

# 關(guān)閉防火墻
[root@nfs ~]# service firewalld stop
[root@nfs ~]# systemctl  disable  firewalld
 
# 關(guān)閉selinux
[root@nfs ~]# setenforce 0
[root@nfs ~]# sed -i '/^SELINUX=/ s/enforcing/disabled/'  /etc/selinux/config

在nfs服務(wù)器和web集群上安裝nfs

[root@nfs ~]# yum install nfs-utils -y

配置NFS共享

# 在nfs服務(wù)器上操作
[root@nfs ~]# vim /etc/exports
/web   192.168.220.0/24(rw,no_root_squash,sync)

# 新建共享目錄
[root@nfs ~]# mkdir /web
[root@nfs ~]# cd /web
[root@nfs web]# echo "welcome to china" >index.html
# 刷新nfs服務(wù)
[root@nfs web]# exportfs -rv
exporting 192.168.220.0/24:/web

# 重啟服務(wù)并且設(shè)置開(kāi)機(jī)啟動(dòng)
[root@nfs web]# systemctl restart nfs && systemctl enable nfs

創(chuàng)建pv使用nfs服務(wù)器上的共享目錄

# 在master上操作
[root@master pv]# mkdir /pv
[root@master pv]# cd /pv/
[root@master pv]# vim nfs-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-web
  labels:
    type: pv-web
spec:
  capacity:
    storage: 10Gi 
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: "/web"
    server: 192.168.121.104
    readOnly: false
    
[root@master pv]# kubectl apply -f nfs-pv.yml
persistentvolume/pv-web created

[root@master pv]# kubectl get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv-web   10Gi       RWX            Retain           Available           nfs                     2m

# 創(chuàng)建pvc
[root@master pv]# vim nfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-web
spec:
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 1Gi
  storageClassName: nfs
 
[root@master pv]# kubectl apply -f nfs-pvc.yaml
persistentvolumeclaim/pvc-web created

[root@master pv]# kubectl get pvc
NAME      STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc-web   Bound    pv-web   10Gi       RWX            nfs            11s

# 創(chuàng)建pod
[root@master pv]# vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      volumes:
        - name: pv-storage-nfs
          persistentVolumeClaim:
            claimName: pvc-web
      containers:
        - name: pv-container-nfs
          image: nginx
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
              name: "http-server"
          volumeMounts:
            - mountPath: "/usr/share/nginx/html"
              name: pv-storage-nfs

[root@master pv]# kubectl apply -f nginx-deployment.yaml
deployment.apps/nginx-deployment created

[root@master pv]# kubectl get pod -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-5ffbcf544-kqq5p   1/1     Running   0          35s   10.224.166.131   node1   <none>           <none>
nginx-deployment-5ffbcf544-sgfxt   1/1     Running   0          35s   10.224.104.3     node2   <none>           <none>
nginx-deployment-5ffbcf544-x27kd   1/1     Running   0          35s   10.224.166.130   node1   <none>           <none>

# 測(cè)試
[root@master pv]# curl 10.224.166.131
welcome to china

# 對(duì)nfs服務(wù)器上index.html的內(nèi)容進(jìn)行修改
[root@nfs web]# vim index.html
welcome to china
hello world!

[root@master pv]# curl 10.224.166.131
welcome to china
hello world!

五、采用HPA技術(shù)來(lái)啟動(dòng)nginx和MySQL的pod,cpu使用率超過(guò)80%時(shí)進(jìn)行水平擴(kuò)縮

部署MySQLpod

# 編寫(xiě)yaml文件
[root@master ~]# mkdir /mysql
[root@master ~]# cd /mysql/
[root@master mysql]# vim mysql.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
    labels:
        app: mysql
    name: mysql
spec:
    replicas: 1
    selector:
        matchLabels:
            app: mysql
    template:
        metadata:
            labels: 
                app: mysql
        spec:
            containers:
            - image: mysql:latest
              name: mysql
              imagePullPolicy: IfNotPresent
              env:
              - name: MYSQL_ROOT_PASSWORD
                value: "123456"
              ports:
              - containerPort: 3306
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: svc-mysql
  name: svc-mysql
spec:
  selector:
    app: mysql
  type: NodePort
  ports:
  - port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 30007

#部署pod
[root@master mysql]# kubectl apply -f mysql.yaml 

[root@master mysql]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP          22h
svc-mysql    NodePort    10.101.185.14   <none>        3306:30007/TCP   70s

[root@master mysql]# kubectl get pods
NAME                               READY   STATUS    RESTARTS   AGE
mysql-597ff9595d-vkx7t             1/1     Running   0          50s
nginx-deployment-5ffbcf544-kqq5p   1/1     Running   0          26m
nginx-deployment-5ffbcf544-sgfxt   1/1     Running   0          26m
nginx-deployment-5ffbcf544-x27kd   1/1     Running   0          26m

# 進(jìn)入MySQL
[root@master mysql]# kubectl exec -it mysql-597ff9595d-vkx7t -- bash
bash-4.4# mysql -uroot -p123456
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 8
Server version: 8.3.0 MySQL Community Server - GPL

Copyright (c) 2000, 2024, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> 

部署帶HPA功能的nginx

# 編寫(xiě)yaml文件
[root@master ~]# mkdir /hpa
[root@master ~]# cd /hpa
[root@master hpa]# vim nginx-hpa.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myweb
spec:
  selector:
    matchLabels:
      run: myweb
  template:
    metadata:
      labels:
        run: myweb
    spec:
      containers:
      - name: myweb
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 200m
          requests:
            cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
  name: myweb-svc
  labels:
    run: myweb-svc
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
    nodePort: 31000
  selector:
    run: myweb

[root@master hpa]# kubectl apply -f nginx-hpa.yaml
deployment.apps/myweb created
service/myweb-svc created

# 啟用HPA水平擴(kuò)縮
[root@master hpa]# kubectl autoscale deployment myweb --cpu-percent=80 --min=1 --max=10
horizontalpodautoscaler.autoscaling/myweb autoscaled

安裝metrics-server

# 在master上操作
# 下載components.yaml配置文件
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

# 修改omponents.yaml配置文件
image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.0
imagePullPolicy: IfNotPresent
args:
// 新增下面兩行參數(shù)
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname

[root@master ~]# cat components.yaml
部分代碼如下
spec:
      containers:
      - args:
        - --kubelet-insecure-tls
        - --cert-dir=/tmp
        - --secure-port=10250
        - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.0
        imagePullPolicy: IfNotPresent
        

# 部署
[root@master ~]# kubectl apply -f components.yaml

# 檢查是否安裝成功
[root@master ~]# kubectl get pods -o wide -n kube-system |grep metrics-server
metrics-server-5bd756b4b8-788qj            1/1     Running   0              51s   10.224.104.10     node2    <none>           <none>

[root@master ~]# kubectl top node
NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
master   163m         8%     1183Mi          32%       
node1    88m          4%     1186Mi          32%       
node2    76m          3%     656Mi           17%

在nfs服務(wù)器上進(jìn)行ab壓力測(cè)試

# 安裝httpd-tools工具
[root@nfs ~]# yum install httpd-tools -y

# 模擬用戶(hù)訪問(wèn)業(yè)務(wù)場(chǎng)景
[root@nfs ~]# ab  -n 1000  -c50  http://192.168.121.101:31000/index.html

# 在master機(jī)器上監(jiān)控HPA狀態(tài)
[root@master ~]# kubectl get hpa --watch

# 增加并發(fā)數(shù)量和請(qǐng)求數(shù)量
[root@nfs ~]# ab  -n 5000  -c100  http://192.168.121.101:31000/index.html
[root@nfs ~]# ab  -n 10000  -c200  http://192.168.121.101:31000/index.html
[root@nfs ~]# ab  -n 20000  -c400  http://192.168.121.101:31000/index.html

六、構(gòu)建CI/CD環(huán)境,安裝部署Jenkins、harbor實(shí)現(xiàn)相關(guān)的代碼發(fā)布

部署并安裝Jenkins

# 安裝git軟件
[root@master ~]# mkdir /jenkins
[root@master ~]# cd /jenkins
[root@master jenkins]# yum install git -y

# 下載yaml文件
[root@master jenkins]# git clone https://github.com/scriptcamp/kubernetes-jenkins

# 創(chuàng)建一個(gè)命名空間
[root@master jenkins]# cd kubernetes-jenkins/
[root@master kubernetes-jenkins]# cat namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: devops-tools

[root@master kubernetes-jenkins]# kubectl apply -f namespace.yaml 
namespace/devops-tools created

# 查看命名空間
[root@master kubernetes-jenkins]# kubectl get ns
NAME              STATUS   AGE
default           Active   39h
devops-tools      Active   89s
kube-node-lease   Active   39h
kube-public       Active   39h
kube-system       Active   39h

# 創(chuàng)建服務(wù)賬號(hào),集群角色綁定
[root@master kubernetes-jenkins]# vim serviceAccount.yaml 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: jenkins-admin
rules:
  - apiGroups: [""]
    resources: ["*"]
    verbs: ["*"]

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: jenkins-admin
  namespace: devops-tools

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: jenkins-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: jenkins-admin
subjects:
- kind: ServiceAccount
  name: jenkins-admin
  namespace: devops-tools

[root@master kubernetes-jenkins]# kubectl apply -f serviceAccount.yaml 
clusterrole.rbac.authorization.k8s.io/jenkins-admin unchanged
serviceaccount/jenkins-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/jenkins-admin created

# 創(chuàng)建卷
[root@master kubernetes-jenkins]# vim  volume.yaml

kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: jenkins-pv-volume
  labels:
    type: local
spec:
  storageClassName: local-storage
  claimRef:
    name: jenkins-pv-claim
    namespace: devops-tools
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  local:
    path: /mnt
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - node1  # 改為自己k8s集群里的node節(jié)點(diǎn)名字

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-pv-claim
  namespace: devops-tools
spec:
  storageClassName: local-storage
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi

[root@master kubernetes-jenkins]# kubectl apply -f volume.yaml 
storageclass.storage.k8s.io/local-storage created
persistentvolume/jenkins-pv-volume created
persistentvolumeclaim/jenkins-pv-claim created

[root@master kubernetes-jenkins]# kubectl get pv
NAME                CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                           STORAGECLASS    REASON   AGE
jenkins-pv-volume   10Gi       RWO            Retain           Bound    devops-tools/jenkins-pv-claim   local-storage            22s
pv-web              10Gi       RWX            Retain           Bound    default/pvc-web                 nfs                      17h

[root@master kubernetes-jenkins]# kubectl describe pv jenkins-pv-volume
Name:              jenkins-pv-volume
Labels:            type=local
Annotations:       <none>
Finalizers:        [kubernetes.io/pv-protection]
StorageClass:      local-storage
Status:            Bound
Claim:             devops-tools/jenkins-pv-claim
Reclaim Policy:    Retain
Access Modes:      RWO
VolumeMode:        Filesystem
Capacity:          10Gi
Node Affinity:     
  Required Terms:  
    Term 0:        kubernetes.io/hostname in [node1]
Message:           
Source:
    Type:  LocalVolume (a persistent volume backed by local storage on a node)
    Path:  /mnt
Events:    <none>

# 部署Jenkins
[root@master kubernetes-jenkins]# cat deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jenkins
  namespace: devops-tools
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jenkins-server
  template:
    metadata:
      labels:
        app: jenkins-server
    spec:
      securityContext:
            fsGroup: 1000 
            runAsUser: 1000
      serviceAccountName: jenkins-admin
      containers:
        - name: jenkins
          image: jenkins/jenkins:lts
          imagePullPolicy: IfNotPresent
          resources:
            limits:
              memory: "2Gi"
              cpu: "1000m"
            requests:
              memory: "500Mi"
              cpu: "500m"
          ports:
            - name: httpport
              containerPort: 8080
            - name: jnlpport
              containerPort: 50000
          livenessProbe:
            httpGet:
              path: "/login"
              port: 8080
            initialDelaySeconds: 90
            periodSeconds: 10
            timeoutSeconds: 5
            failureThreshold: 5
          readinessProbe:
            httpGet:
              path: "/login"
              port: 8080
            initialDelaySeconds: 60
            periodSeconds: 10
            timeoutSeconds: 5
            failureThreshold: 3
          volumeMounts:
            - name: jenkins-data
              mountPath: /var/jenkins_home         
      volumes:
        - name: jenkins-data
          persistentVolumeClaim:
              claimName: jenkins-pv-claim
              
[root@master kubernetes-jenkins]# kubectl apply -f deployment.yaml 
deployment.apps/jenkins created

[root@master kubernetes-jenkins]# kubectl get deploy -n devops-tools
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
jenkins   1/1     1            1           7m

[root@master kubernetes-jenkins]# kubectl get pod -n devops-tools
NAME                      READY   STATUS    RESTARTS   AGE
jenkins-b96f7764f-2gzrk   1/1     Running   0          6m7s

# 發(fā)布Jenkins pod
[root@master kubernetes-jenkins]# cat service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: jenkins-service
  namespace: devops-tools
  annotations:
      prometheus.io/scrape: 'true'
      prometheus.io/path:   /
      prometheus.io/port:   '8080'
spec:
  selector: 
    app: jenkins-server
  type: NodePort  
  ports:
    - port: 8080
      targetPort: 8080
      nodePort: 32000

[root@master kubernetes-jenkins]# kubectl apply -f service.yaml
service/jenkins-service created

[root@master kubernetes-jenkins]# kubectl get svc -n devops-tools
NAME              TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
jenkins-service   NodePort   10.98.133.177   <none>        8080:32000/TCP   8s

# 使用瀏覽器訪問(wèn)宿主機(jī)的IP+端口號(hào)
http://192.168.2.104:32000

# 進(jìn)入pod里獲取密碼
[root@master kubernetes-jenkins]# kubectl exec -it jenkins-b96f7764f-2gzrk -n devops-tools -- bash
jenkins@jenkins-b96f7764f-2gzrk:/$ cat /var/jenkins_home/secrets/initialAdminPassword
af75ec6ce21d47f2b111f0e60b69ebb9

部署安裝harbor

# 準(zhǔn)備一臺(tái)2核4G內(nèi)存的虛擬機(jī)
# 配置好阿里云的repo源
[root@harbor ~]# yum install yum-utils -y
[root@harbor ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安裝docker
[root@harbor ~]# yum install docker-ce-20.10.6 -y

# 啟動(dòng)docker,設(shè)置開(kāi)機(jī)啟動(dòng)
[root@harbor ~]# systemctl start docker
[root@harbor ~]# systemctl enable docker.service

# 查看docker和docker compose的版本
[root@harbor ~]# docker version
[root@harbor ~]# docker compose version

# 安裝harbor
[root@harbor ~]# wget https://github.com/goharbor/harbor/releases/download/v2.8.3/harbor-offline-installer-v2.8.3.tgz

# 解壓
[root@harbor ~]# ls
anaconda-ks.cfg  harbor-offline-installer-v2.8.3.tgz
[root@harbor ~]# tar xf harbor-offline-installer-v2.8.3.tgz

# 修改配置文件
[root@harbor ~]# ls
anaconda-ks.cfg  harbor  harbor-offline-installer-v2.8.3.tgz

[root@harbor ~]# cd harbor
[root@harbor harbor]# ls
common.sh             harbor.yml.tmpl  LICENSE
harbor.v2.8.3.tar.gz  install.sh       prepare

[root@harbor harbor]# vim harbor.yml.tmpl
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: 192.168.121.105  #修改為主機(jī)ip地址
 
# http related config
http:
  # port for http, default is 80. If https enabled, this port will redirect to https port
  port: 5001  #端口可以修改
# 其余地方不做修改 

# 安裝harbor
[root@harbor harbor]# mv harbor.yml.tmpl  harbor.yml
[root@harbor harbor]# ./install.sh
[+] Running 9/10
 ? Network harbor_harbor        Created                                                                     2.8s 
 ? Container harbor-log         Started                                                                     0.5s 
 ? Container registry           Started                                                                     1.5s 
 ? Container harbor-db          Started                                                                     1.2s 
 ? Container harbor-portal      Started                                                                     1.6s 
 ? Container redis              Started                                                                     1.5s 
 ? Container registryctl        Started                                                                     1.2s 
 ? Container harbor-core        Started                                                                     1.9s 
 ? Container harbor-jobservice  Started                                                                     2.4s 
 ? Container nginx              Started                                                                     2.5s 
? ----Harbor has been installed and started successfully.----

[root@harbor harbor]# docker compose ps|grep harbor
WARN[0000] /root/harbor/docker-compose.yml: `version` is obsolete 
harbor-core         goharbor/harbor-core:v2.8.3          "/harbor/entrypoint.…"   core          2 minutes ago   Up 2 minutes (healthy)   
harbor-db           goharbor/harbor-db:v2.8.3            "/docker-entrypoint.…"   postgresql    2 minutes ago   Up 2 minutes (healthy)   
harbor-jobservice   goharbor/harbor-jobservice:v2.8.3    "/harbor/entrypoint.…"   jobservice    2 minutes ago   Up 2 minutes (healthy)   
harbor-log          goharbor/harbor-log:v2.8.3           "/bin/sh -c /usr/loc…"   log           2 minutes ago   Up 2 minutes (healthy)   127.0.0.1:1514->10514/tcp
harbor-portal       goharbor/harbor-portal:v2.8.3        "nginx -g 'daemon of…"   portal        2 minutes ago   Up 2 minutes (healthy)   
nginx               goharbor/nginx-photon:v2.8.3         "nginx -g 'daemon of…"   proxy         2 minutes ago   Up 2 minutes (healthy)   0.0.0.0:5001->8080/tcp, :::5001->8080/tcp
redis               goharbor/redis-photon:v2.8.3         "redis-server /etc/r…"   redis         2 minutes ago   Up 2 minutes (healthy)   
registry            goharbor/registry-photon:v2.8.3      "/home/harbor/entryp…"   registry      2 minutes ago   Up 2 minutes (healthy)   
registryctl         goharbor/harbor-registryctl:v2.8.3   "/home/harbor/start.…"   registryctl   2 minutes ago   Up 2 minutes (healthy)   

# 用瀏覽器訪問(wèn),測(cè)試效果
http://192.168.121.105:5001/
賬號(hào):admin
密碼:Harbor12345
新建一個(gè)項(xiàng)目:k8s-harbor
新建一個(gè)用戶(hù):user 
密碼:Aa12345678
授權(quán)k8s-harbor項(xiàng)目允許用戶(hù)user去訪問(wèn),授予項(xiàng)目管理員權(quán)限

# 實(shí)現(xiàn)k8s集群使用harbor倉(cāng)庫(kù)
[root@master ~]# vim /etc/docker/daemon.json 
{
  "registry-mirrors": [
    "https://registry.docker-cn.com",
    "http://hub-mirror.c.163.com",
    "https://reg-mirror.qiniu.com",
    "https://docker.mirrors.ustc.edu.cn"
  ],
  "insecure-registries":["192.168.121.105:5001"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "data-root": "/opt/lib/docker"
}

測(cè)試harbor的鏡像上傳和拉取

# 在原來(lái)安裝harbor的宿主機(jī)上,重新啟動(dòng)harbor相關(guān)的容器
[root@docker harbor]# cd /harbor/harbor
[root@docker harbor]# docker compose up -d

# master上登陸harbor倉(cāng)庫(kù)
[root@master ~]# docker login 192.168.121.105:5001
Username: user
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

# 將master上的nginx鏡像上傳到倉(cāng)庫(kù)里
[root@master ~]# docker tag nginx:latest  192.168.121.105:5001/k8s-harbor/nginx:latest
[root@master ~]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
192.168.121.105:5001/k8s-harbor/nginx                             latest     92b11f67642b   7 weeks ago     187MB

[root@master ~]# docker push 192.168.121.105:5001/k8s-harbor/nginx:latest

# 在nfs服務(wù)器上拉取harbor倉(cāng)庫(kù)里的nginx鏡像
[root@nfs ~]# vim /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://ruk1gp3w.mirror.aliyuncs.com"],
  "insecure-registries" : ["192.168.121.105:5001"] 
}

# 重啟docker
[root@nfs ~]# systemctl daemon-reload
[root@nfs ~]# systemctl restart docker

# 登陸harbor倉(cāng)庫(kù)
[root@nfs ~]# docker login 192.168.121.105:5001
[root@nfs ~]# docker pull  192.168.121.105:5001/k8s-harbor/nginx:latest
[root@nfs ~]# docker images|grep nginx
192.168.121.105:5001/k8s-harbor/nginx                             latest     92b11f67642b   7 weeks ago     187MB

七、使用ingress,給web業(yè)務(wù)做基于域名的負(fù)載均衡

安裝ingress controller

[root@master ~]# mkdir /ingress
[root@master ~]# cd /ingress/
[root@master ingress]# ls
ingress-controller-deploy.yaml         kube-webhook-certgen-v1.1.0.tar.gz
ingress-nginx-controllerv1.1.0.tar.gz  nginx-svc-1.yaml
ingress.yaml                           nginx-svc-2.yaml

# 將鏡像傳到所有的node節(jié)點(diǎn)服務(wù)器上
[root@master ingress]# scp kube-webhook-certgen-v1.1.0.tar.gz node2:/root
[root@master ingress]# scp kube-webhook-certgen-v1.1.0.tar.gz node1:/root
[root@master ingress]# scp ingress-nginx-controllerv1.1.0.tar.gz node1:/root
[root@master ingress]# scp ingress-nginx-controllerv1.1.0.tar.gz node2:/root

# 在node1和node2上導(dǎo)入鏡像
[root@node1 ~]# docker load -i ingress-nginx-controllerv1.1.0.tar.gz 
[root@node1 ~]# docker load -i kube-webhook-certgen-v1.1.0.tar.gz
[root@node1 ~]# docker images
REPOSITORY                                                                     TAG        IMAGE ID       CREATED         SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller   v1.1.0     ae1a7201ec95   2 years ago     285MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen       v1.1.1     c41e9fcadf5a   2 years ago     47.7MB

# 使用ingress-controller-deploy.yaml文件啟動(dòng)ingress controller
[root@master ingress]# kubectl apply -f ingress-controller-deploy.yaml 
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
configmap/ingress-nginx-controller created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
service/ingress-nginx-controller-admission created
service/ingress-nginx-controller created
deployment.apps/ingress-nginx-controller created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
serviceaccount/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created

# 查看命名空間
[root@master ingress]# kubectl get namespace
NAME              STATUS   AGE
default           Active   44h
devops-tools      Active   5h30m
ingress-nginx     Active   14s
kube-node-lease   Active   44h
kube-public       Active   44h
kube-system       Active   44h

# 查看相關(guān)service
[root@master ingress]# kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.101.156.215   <none>        80:32023/TCP,443:30325/TCP   2m47s
ingress-nginx-controller-admission   ClusterIP   10.105.220.120   <none>        443/TCP                      2m47s


# 查看相關(guān)pod
[root@master ingress]# kubectl get pod -n ingress-nginx
NAME                                        READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-cpd95        0/1     Completed   0          3m28s
ingress-nginx-admission-patch-jdk4w         0/1     Completed   1          3m28s
ingress-nginx-controller-7cd558c647-2d878   1/1     Running     0          3m28s
ingress-nginx-controller-7cd558c647-ct69k   1/1     Running     0          3m28s

創(chuàng)建pod和暴露pod的服務(wù)

[root@master ingress]# vim nginx-svc-1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deploy-1
  labels:
    app: nginx-1
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-1
  template:
    metadata:
      labels:
        app: nginx-1
    spec:
      containers:
      - name: nginx-1
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name:  nginx-svc-1
  labels:
    app: nginx-svc-1
spec:
  selector:
    app: nginx-1
  ports:
  - name: name-of-service-port
    protocol: TCP
    port: 80
    targetPort: 80

[root@master ingress]# vim nginx-svc-2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deploy-2
  labels:
    app: nginx-2
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx-2
  template:
    metadata:
      labels:
        app: nginx-2
    spec:
      containers:
      - name: nginx-2
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name:  nginx-svc-2
  labels:
    app: nginx-svc-2
spec:
  selector:
    app: nginx-2
  ports:
  - name: name-of-service-port
    protocol: TCP
    port: 80
    targetPort: 80

[root@master ingress]# kubectl apply -f nginx-svc-1.yaml
deployment.apps/nginx-deploy-1 created
service/nginx-svc-1 created
[root@master ingress]# kubectl apply -f nginx-svc-2.yaml
deployment.apps/nginx-deploy-2 created
service/nginx-svc-2 created

啟用ingress 關(guān)聯(lián)ingress controller 和service

[root@master ingress]# vim ingress-url.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: simple-fanout-example
  annotations:
    kubernets.io/ingress.class: nginx
spec:
  ingressClassName: nginx
  rules:
  - host: www.qqx.com
    http:
      paths:
      - path: /foo
        pathType: Prefix
        backend:
          service:
            name: nginx-svc-1
            port:
              number: 80
      - path: /bar
        pathType: Prefix
        backend:
          service:
            name: nginx-svc-2
            port:
              number: 80

[root@master ingress]# kubectl apply -f ingress-url.yaml 
ingress.networking.k8s.io/simple-fanout-example created

[root@master ingress]# kubectl get ingress
NAME                    CLASS   HOSTS         ADDRESS                           PORTS   AGE
simple-fanout-example   nginx   www.qqx.com   192.168.121.102,192.168.121.103   80      44m

在nfs服務(wù)器上進(jìn)行測(cè)試,需要在/etc/hosts文件里添加域名解析記錄

[root@nfs ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.121.102 www.qqx.com
192.168.121.103 www.qqx.com 

# 進(jìn)入service1對(duì)應(yīng)的一個(gè)pod里
[root@master ingress]# kubectl exec -it nginx-deploy-1-75d4755db9-bfkd2 -- bash

# 新建/for文件夾以及index.html網(wǎng)頁(yè)文件
root@nginx-deploy-1-75d4755db9-bfkd2:/# cd /usr/share/nginx/html/ 
root@nginx-deploy-1-75d4755db9-bfkd2:/usr/share/nginx/html# mkdir /foo
root@nginx-deploy-1-75d4755db9-bfkd2:/usr/share/nginx/html# cd /foo/
root@nginx-deploy-1-75d4755db9-bfkd2:/usr/share/nginx/html/foo# echo "this is www.qqx.com/foo/" >index.html
root@nginx-deploy-1-75d4755db9-bfkd2:/usr/share/nginx/html/foo# exit

# 同理,進(jìn)入service2上的pod并新建/bar文件夾以及index.html網(wǎng)頁(yè)文件
[root@master ingress]# kubectl exec -it nginx-deploy-2-5c47798b5f-bnpxj -- bash
root@nginx-deploy-2-5c47798b5f-bnpxj:/# cd /usr/share/nginx/html/
root@nginx-deploy-2-5c47798b5f-bnpxj:/usr/share/nginx/html# mkdir bar
root@nginx-deploy-2-5c47798b5f-bnpxj:/usr/share/nginx/html# cd bar/
root@nginx-deploy-2-5c47798b5f-bnpxj:/usr/share/nginx/html/bar# echo "this is www.qqx.com/bar/" >index.html
root@nginx-deploy-2-5c47798b5f-bnpxj:/usr/share/nginx/html/bar# exit

# 在nfs服務(wù)器上
[root@nfs ~]# curl www.qqx.com/foo/index.html
this is www.qqx.com/foo/
[root@nfs ~]# curl www.qqx.com/bar/index.html
this is www.qqx.com/bar/

八、使用探針監(jiān)控web業(yè)務(wù)的pod,出現(xiàn)問(wèn)題馬上重啟,增強(qiáng)業(yè)務(wù)可靠性

[root@master /]# mkdir /probe
[root@master /]# cd /probe/

# 為了和第五步的myweb.yaml區(qū)分,下面的myweb都加上后綴
[root@master probe]# vim myweb2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: myweb2
  name: myweb2
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myweb2
  template:
    metadata:
      labels:
        app: myweb2
    spec:
      containers:
      - name: myweb2
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8000
        resources:
          limits:
            cpu: 300m
          requests:
            cpu: 100m
        livenessProbe:
          exec:
            command:
            - ls
            - /tmp
          initialDelaySeconds: 5
          periodSeconds: 5
        readinessProbe:
          exec:
            command:
            - ls
            - /tmp
          initialDelaySeconds: 5
          periodSeconds: 5   
        startupProbe:
          httpGet:
            path: /
            port: 8000
          failureThreshold: 30
          periodSeconds: 10
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: myweb2-svc
  name: myweb2-svc
spec:
  selector:
    app: myweb2
  type: NodePort
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 8000
    nodePort: 30001

[root@master probe]# kubectl apply -f myweb2.yaml 
deployment.apps/myweb2 unchanged
service/myweb2-svc created

[root@master probe]# kubectl get pod |grep -i  myweb2
myweb2-7c4dcb8459-7vn8d            1/1     Running     2 (84s ago)   11m
myweb2-7c4dcb8459-jxdpf            1/1     Running     2 (84s ago)   11m
myweb2-7c4dcb8459-zc9n7            1/1     Running     2 (84s ago)   11m

[root@master probe]# kubectl describe pod myweb2-7c4dcb8459-zc9n7
Liveness:     exec [ls /tmp] delay=5s timeout=1s period=5s #success=1 #failure=3
Readiness:    exec [ls /tmp] delay=5s timeout=1s period=5s #success=1 #failure=3
Startup:      http-get http://:8000/ delay=0s timeout=1s period=10s #success=1 #failure=30

九、使用dashboard掌控整個(gè)web集群的資源

[root@master ~]# mkdir /dashboard
[root@master ~]# cd /dashboard
[root@master dashboard]# vim recommended.yaml 
---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  # 指定類(lèi)型
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      # 指定宿主機(jī)的端口
      nodePort: 30088
  selector:
    k8s-app: kubernetes-dashboard
---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: serviceaccount-cluster-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: User
    apiGroup: rbac.authorization.k8s.io
    name: system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard

---

[root@master dashboard]# kubectl apply -f recommended.yaml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

# 查看命名空間
[root@master dashboard]# kubectl get ns
NAME                   STATUS   AGE
default                Active   2d13h
devops-tools           Active   22h
ingress-nginx          Active   16h
kube-node-lease        Active   2d13h
kube-public            Active   2d13h
kube-system            Active   2d13h
kubernetes-dashboard   Active   20m

# 查看pod是否啟動(dòng)
[root@master dashboard]# kubectl get pods -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-799d786dbf-mqrlw   1/1     Running   0          24s
kubernetes-dashboard-546cbc58cd-9s6gk        1/1     Running   0          24s

# 查看服務(wù)是否啟動(dòng)
[root@master dashboard]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.97.45.28      <none>        8000/TCP        32s
kubernetes-dashboard        NodePort    10.111.113.122   <none>        443:30088/TCP   32s

在瀏覽器里訪問(wèn) https://192.168.121.101:30088/

# 獲取token
[root@master dashboard]#  kubectl get secret -n kubernetes-dashboard|grep dashboard-token
kubernetes-dashboard-token-ltt6g   kubernetes.io/service-account-token   3      31m
[root@master dashboard]# kubectl describe secret -n kubernetes-dashboard kubernetes-dashboard-token-ltt6g

十、安裝部署Prometheus+grafana

安裝Prometheus

[root@prometheus ~]# mkdir /prom
[root@prometheus ~]# cd /prom

# 把Prometheus安裝包傳輸?shù)疆?dāng)前目錄
[root@prometheus /prom]# ls
prometheus-2.43.0.linux-amd64.tar.gz

# 解壓文件
[root@prometheus prom]# tar xf prometheus-2.43.0.linux-amd64.tar.gz 
[root@prometheus prom]# ls
prometheus-2.43.0.linux-amd64  prometheus-2.43.0.linux-amd64.tar.gz

[root@prometheus prom]# mv prometheus-2.43.0.linux-amd64 prometheus
[root@prometheus prom]# cd prometheus
# 修改PATH變量
[root@prometheus prometheus]# PATH=/prom/prometheus:$PATH
[root@prometheus prometheus]# vim /etc/profile
PATH=/prom/prometheus:$PATH # 加在最后
# 在后臺(tái)運(yùn)行
[root@prometheus prometheus]# nohup prometheus --config.file=/prom/prometheus/prometheus.yml  &

# 查看Prometheus進(jìn)程
[root@prometheus prometheus]# ps aux|grep prom
root       8197  1.4  2.1 798956 40900 pts/0    Sl   14:56   0:00 prometheus --config.file=/prom/prometheus/prometheus.yml
root       8204  0.0  0.0 112824   972 pts/0    S+   14:56   0:00 grep --color=auto prom

# 查看Prometheus端口
[root@prometheus prometheus]# netstat -anplut | grep prom
tcp6       0      0 :::9090                 :::*                    LISTEN      8197/prometheus     
tcp6       0      0 ::1:9090                ::1:41618               ESTABLISHED 8197/prometheus     
tcp6       0      0 ::1:41618               ::1:9090                ESTABLISHED 8197/prometheus

# 關(guān)閉防火墻
[root@prometheus prometheus]# service firewalld stop
Redirecting to /bin/systemctl stop firewalld.service
[root@prometheus prometheus]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

# 配置關(guān)于Prometheus服務(wù)的參數(shù)
[root@prometheus prometheus]# vim /usr/lib/systemd/system/prometheus.service
[Unit]
Description=prometheus
[Service]
ExecStart=/prom/prometheus/prometheus --config.file=/prom/promethe
us/prometheus.yml
ExecReload=/bin/kill -HUP $MAINPID
killMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target

# 關(guān)閉selinux
[root@prometheus prometheus]# setenforce 0
[root@prometheus prometheus]# sed -i '/^SELINUX=/ s/enforcing/disabled/'  /etc/selinux/config


# 重新加載systemd的配置文件
[root@prometheus prometheus]# systemctl daemon-reload

[root@prometheus prometheus]# ps aux|grep prom
root       8734  0.0  2.6 930284 48580 pts/0    Sl   17:55   0:00 prometheus --config.file=/prom/prometheus/prometheus.yml

[root@prometheus prometheus]# kill -9 8734
[root@prometheus prometheus]# service prometheus restart
# 用瀏覽器訪問(wèn) IP:9090

在要被監(jiān)聽(tīng)的機(jī)器上安裝exporter

# 在被監(jiān)聽(tīng)的機(jī)器上操作,這里我們選擇master
[root@master ~]# tar xf node_exporter-1.4.0-rc.0.linux-amd64.tar.gz 
[root@master ~]# mv node_exporter-1.4.0-rc.0.linux-amd64 /node_exporter
[root@master ~]# cd /node_exporter/
[root@master node_exporter]# ls
LICENSE  node_exporter  NOTICE
 
# 修改環(huán)境變量
[root@master node_exporter]# PATH=/node_exporter/:$PATH
[root@master node_exporter]# vim /root/.bashrc 
PATH=/node_exporter/:$PATH

# 在后臺(tái)執(zhí)行
[root@master node_exporter]# nohup node_exporter --web.listen-address 0.0.0.0:8090 &
[1] 4844
[root@master node_exporter]# nohup: 忽略輸入并把輸出追加到"nohup.out"
 
# 檢查進(jìn)程
[root@master node_exporter]# ps aux |grep node_exporter
root      84412  0.0  0.3 716544 13104 pts/0    Sl   15:51   0:00 node_exporter --web.listen-address 0.0.0.0:8090
root      84846  0.0  0.0 112824   980 pts/0    S+   15:52   0:00 grep --color=auto node_exporter
 
# 關(guān)閉防火墻
[root@prometheus /]# service firewalld stop
Redirecting to /bin/systemctl stop firewalld.service
[root@prometheus /]# systemctl disable  firewalld

#關(guān)閉selinux:
setenforce 0
sed -i '/^SELINUX=/ s/enforcing/disabled/'  /etc/selinux/config
 
瀏覽器訪問(wèn):ip+8090
 
# 設(shè)置node_exporter開(kāi)機(jī)啟動(dòng)
[root@prometheus /]# vim /etc/rc.local 
nohup /node_exporter/node_exporter --web.listen-address 0.0.0.0:8090 &
[root@prometheus node_exporter]# chmod +x /etc/rc.d/rc.local
 
在Prometheus機(jī)器的/prom/prometheus/prometheus.yml 里添加機(jī)器
 
 
  - job_name: "prometheus"
    static_configs:
      - targets: ["localhost:9090"]
  - job_name: "master"
    static_configs:
      - targets: ["192.168.121.101:8090"]

 
# 刷新Prometheus服務(wù)                      
[root@prometheus prometheus]# service prometheus restart
Redirecting to /bin/systemctl restart prometheus.service

安裝并部署Grafana

[root@prometheus ~]# ls
anaconda-ks.cfg  grafana-enterprise-9.1.2-1.x86_64.rpm  node_exporter-1.4.0-rc.0.linux-amd64.tar.gz

[root@prometheus ~]# yum install grafana-enterprise-9.1.2-1.x86_64.rpm  -y
啟動(dòng)
[root@prometheus ~]# service grafana-server start
Starting grafana-server (via systemctl):                   [  確定  ]
 
[root@prometheus ~]# ps aux|grep grafana
grafana    8230  6.6  3.8 1195912 71472 ?       Ssl  16:48   0:00 /usr/sbin/grafana-server --config=/etc/grafana/grafana.ini --pidfile=/var/run/grafana/grafana-server.pid --packaging=rpm cfg:default.paths.logs=/var/log/grafana cfg:default.paths.data=/var/lib/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
root       8238  0.0  0.0 112824   976 pts/0    R+   16:49   0:00 grep --color=auto grafana

[root@prometheus ~]# netstat -anplut|grep grafana
tcp        0      0 192.168.121.108:44674   34.120.177.193:443      ESTABLISHED 8230/grafana-server 
tcp6       0      0 :::3000                 :::*                    LISTEN      8230/grafana-server

# 瀏覽器訪問(wèn) ip:3000
默認(rèn)用戶(hù)名:admin
默認(rèn)密碼:admin

Configuration --> Add data source -> 選擇Prometheus
填 http://192.168.121.108:9090
dashboard -> import -> 添加模板 1860 -> 選擇Prometheus數(shù)據(jù)源

項(xiàng)目心得

1.更加了解開(kāi)發(fā)和運(yùn)維的關(guān)系
2.更加熟悉ansible、Prometheus等服務(wù)
3.自己的故障排錯(cuò)能力得到了提升
4.對(duì)負(fù)載均衡、高可用和自動(dòng)擴(kuò)縮有了新的認(rèn)識(shí)文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-860468.html

到了這里,關(guān)于基于k8s的高性能高可用的web集群的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!

本文來(lái)自互聯(lián)網(wǎng)用戶(hù)投稿,該文觀點(diǎn)僅代表作者本人,不代表本站立場(chǎng)。本站僅提供信息存儲(chǔ)空間服務(wù),不擁有所有權(quán),不承擔(dān)相關(guān)法律責(zé)任。如若轉(zhuǎn)載,請(qǐng)注明出處: 如若內(nèi)容造成侵權(quán)/違法違規(guī)/事實(shí)不符,請(qǐng)點(diǎn)擊違法舉報(bào)進(jìn)行投訴反饋,一經(jīng)查實(shí),立即刪除!

領(lǐng)支付寶紅包贊助服務(wù)器費(fèi)用

相關(guān)文章

  • 【K8S&RockyLinux】基于開(kāi)源操作系統(tǒng)搭建K8S高可用集群(詳細(xì)版)

    【K8S&RockyLinux】基于開(kāi)源操作系統(tǒng)搭建K8S高可用集群(詳細(xì)版)

    角色 主機(jī)名 IP地址 系統(tǒng)版本 CPU/MEM master m1 192.168.200.61 Rocky Linux 8.5 2C/2GB master m2 192.168.200.62 Rocky Linux 8.5 2C/2GB master m3 192.168.200.63 Rocky Linux 8.5 2C/2GB node n1 192.168.200.64 Rocky Linux 8.5 2C/4GB node n2 192.168.200.65 Rocky Linux 8.5 2C/4GB node n3 192.168.200.66 Rocky Linux 8.5 2C/4GB VIP 192.168.200.68 ?。?!注意

    2024年02月09日
    瀏覽(19)
  • 基于Docker搭建多主多從K8s高可用集群

    基于Docker搭建多主多從K8s高可用集群

    主機(jī)規(guī)劃 master - 最低兩核心,否則集群初始化失敗 主機(jī)名 IP地址 角色 操作系統(tǒng) 硬件配置 ansible 10.62.158.200 同步工具節(jié)點(diǎn) CentOS 7 2 Core/4G Memory master01 10.62.158.201 管理節(jié)點(diǎn)01 CentOS 7 2 Core/4G Memory master02 10.62.158.202 管理節(jié)點(diǎn)02 CentOS 7 2 Core/4G Memory master03 10.62.158.203 管理節(jié)點(diǎn)03 CentOS

    2024年04月22日
    瀏覽(26)
  • OpenResty安裝-(基于Nginx的高性能Web平臺(tái),可在Nginx端編碼業(yè)務(wù))

    OpenResty安裝-(基于Nginx的高性能Web平臺(tái),可在Nginx端編碼業(yè)務(wù))

    首先你的Linux虛擬機(jī)必須聯(lián)網(wǎng) 首先要安裝OpenResty的依賴(lài)開(kāi)發(fā)庫(kù),執(zhí)行命令: 你可以在你的 CentOS 系統(tǒng)中添加 openresty 倉(cāng)庫(kù),這樣就可以便于未來(lái)安裝或更新我們的軟件包(通過(guò) yum check-update 命令)。運(yùn)行下面的命令就可以添加我們的倉(cāng)庫(kù): 如果提示說(shuō)命令不存在,則運(yùn)行:

    2024年02月07日
    瀏覽(53)
  • k8s集群環(huán)境部署-高可用部署

    k8s集群環(huán)境部署-高可用部署

    1.1 kube-apiserver: Kubernetes API server 為 api 對(duì)象驗(yàn)證并配置數(shù)據(jù),包括 pods、 services、replicationcontrollers和其它 api 對(duì)象,API Server 提供 REST 操作,并為集群的共享狀態(tài)提供前端訪問(wèn)??,kubernetes中的所有其他組件都通過(guò)該前端進(jìn)?交互。 https://kubernetes.io/zh/docs/reference/command-line-

    2024年02月03日
    瀏覽(26)
  • K8S二進(jìn)制部署詳解,一文教會(huì)你部署高可用K8S集群

    Pod網(wǎng)段: 10.0.0.0/16 Service網(wǎng)段: 10.255.0.0/16 集群角色 ip 主機(jī)名 安裝組件 控制節(jié)點(diǎn) 10.10.0.10 master01 apiserver、controller-manager、scheduler、etcd、docker、keepalived、nginx 控制節(jié)點(diǎn) 10.10.0.11 master02 apiserver、controller-manager、scheduler、etcd、docker、keepalived、nginx 控制節(jié)點(diǎn) 10.10.0.12 master03 apiser

    2024年04月28日
    瀏覽(29)
  • Keepalived+Lvs高可用高性能負(fù)載配置

    環(huán)境準(zhǔn)備 IP 配置 VIP node1 192.168.134.170 LVS+Keepalived 192.168.134.100 node3 192.168.134.172 LVS+Keepalived 192.168.134.100 node2 192.168.134.171 做web服務(wù)器使用 node4 192.168.134.173 做web服務(wù)器使用 1、準(zhǔn)備node1與node3環(huán)境(安裝LVS與Keepalived)==由于只是簡(jiǎn)單的模擬測(cè)試,故環(huán)境安裝使用yum即可。(LVS使用

    2024年02月13日
    瀏覽(26)
  • Lvs+KeepAlived高可用高性能負(fù)載均衡

    Lvs+KeepAlived高可用高性能負(fù)載均衡

    目錄 1.環(huán)境介紹 2.配置keepalived ?3.測(cè)試 1.測(cè)試負(fù)載均衡 2.測(cè)試RS高可用 3.測(cè)試LVS高可用 3.1測(cè)試lvs主服務(wù)宕機(jī) 3.2.測(cè)試lvs主服務(wù)器恢復(fù) 4.我在實(shí)驗(yàn)中遇到的錯(cuò)誤 環(huán)境:centos7 RS1---RIP1:192.168.163.145 ? ? ? ? ? ?VIP 192.168.163.200 RS2---RIP2:192.168.163.146 ? ? ? ? ? ?VIP 192.168.163.200 LVS_MAST

    2024年02月10日
    瀏覽(32)
  • 構(gòu)建三高架構(gòu):高性能、高可用、高可擴(kuò)展

    在當(dāng)今計(jì)算機(jī)科學(xué)領(lǐng)域,構(gòu)建強(qiáng)大、高效的系統(tǒng)已成為迫切需求。為了應(yīng)對(duì)用戶(hù)需求的不斷增加,三高架構(gòu)應(yīng)運(yùn)而生,包括高性能、高可用性和高可擴(kuò)展性。本文將深入探討這三個(gè)關(guān)鍵特性,并提供基于 Java 的代碼示例來(lái)說(shuō)明這些概念的實(shí)際應(yīng)用。 1.1 優(yōu)化的系統(tǒng)設(shè)計(jì) 在構(gòu)建

    2024年01月25日
    瀏覽(30)
  • k8s集群部署 | 三節(jié)點(diǎn)(復(fù)用)高可用集群過(guò)程參考

    k8s集群部署 | 三節(jié)點(diǎn)(復(fù)用)高可用集群過(guò)程參考

    1.1.1 實(shí)驗(yàn)架構(gòu)圖 1.1.2 系統(tǒng)版本說(shuō)明 OS 版本:CentOS Linux release 7.9.2009 (Core) 初始內(nèi)核版本:3.10.0-1160.71.1.el7.x86_64 配置信息:2C2G 150G硬盤(pán) 文件系統(tǒng):xfs 網(wǎng)絡(luò):外網(wǎng)權(quán)限 k8s 版本:1.25.9 1.1.3 環(huán)境基本信息 K8s集群角色 IP地址 主機(jī)名 組件信息 控制節(jié)點(diǎn)1(工作節(jié)點(diǎn)1) 192.168.204.10 k8

    2024年02月04日
    瀏覽(30)
  • Kubernetes(K8s)使用 kubeadm 方式搭建多 master 高可用 K8s 集群

    Kubernetes(K8s)使用 kubeadm 方式搭建多 master 高可用 K8s 集群

    本篇主要針對(duì)上篇文章的單 master 節(jié)點(diǎn)的 K8s 集群上搭建多 master 節(jié)點(diǎn)集群 和 LB 負(fù)載均衡服務(wù)器。 Kubernetes(K8S)集群搭建基礎(chǔ)入門(mén)教程 虛擬機(jī) IP 地址: IP 操作系統(tǒng) 主機(jī)名稱(chēng) 192.168.2.121 centos7.9 k8s-master01 192.168.2.124 centos7.9 k8s-master02 192.168.2.125 centos7.9 k8s-node01 192.168.2.126 centos

    2023年04月26日
    瀏覽(34)

覺(jué)得文章有用就打賞一下文章作者

支付寶掃一掃打賞

博客贊助

微信掃一掃打賞

請(qǐng)作者喝杯咖啡吧~博客贊助

支付寶掃一掃領(lǐng)取紅包,優(yōu)惠每天領(lǐng)

二維碼1

領(lǐng)取紅包

二維碼2

領(lǐng)紅包