使用kubeadm或者其他方式部署一套k8s集群。
在k8s集群創(chuàng)建一個namespace:halashow
2 ELK部署架構(gòu)
3.1 準備資源配置清單
?Deployment中存在一個es的業(yè)務(wù)容器,和一個init容器,init容器主要是配置vm.max_map_count=262144。
service暴露了9200端口,其他服務(wù)可通過service name加端口訪問es。
3.1 準備資源配置清單
?Deployment中存在一個es的業(yè)務(wù)容器,和一個init容器,init容器主要是配置vm.max_map_count=262144。
service暴露了9200端口,其他服務(wù)可通過service name加端口訪問es。
apiVersion: v1
kind: Service
metadata:
namespace: halashow
name: elasticsearch
labels:
app: elasticsearch-logging
spec:
type: ClusterIP
ports:
- port: 9200
name: elasticsearch
selector:
app: elasticsearch-logging
---
apiVersion: apps/v1
kind: Deployment
metadata:
generation: 1
labels:
app: elasticsearch-logging
version: v1
name: elasticsearch
namespace: halashow
spec:
serviceName: elasticsearch-logging
minReadySeconds: 10
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: elasticsearch-logging
version: v1
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
app: elasticsearch-logging
version: v1
spec:
affinity:
nodeAffinity: {}
containers:
- env:
- name: discovery.type
value: single-node
- name: ES_JAVA_OPTS
value: -Xms512m -Xmx512m
- name: MINIMUM_MASTER_NODES
value: "1"
image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0-amd64
imagePullPolicy: IfNotPresent
name: elasticsearch-logging
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: "1"
memory: 1Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data
name: es-persistent-storage
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: user-1-registrysecret
initContainers:
- command:
- /sbin/sysctl
- -w
- vm.max_map_count=262144
image: alpine:3.6
imagePullPolicy: IfNotPresent
name: elasticsearch-logging-init
resources: {}
securityContext:
privileged: true
procMount: Default
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /data/elk/elasticsearch-logging
type: DirectoryOrCreate
name: es-persistent-storage
nodeSelector:
alibabacloud.com/is-edge-worker: 'false'
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
key: node-role.alibabacloud.com/addon
operator: Exists
elasticsearch持久化部署,參考資料
https://www.51cto.com/article/673023.html
apiVersion: v1
kind: Service
metadata:
name: es
namespace: default
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Elasticsearch"
spec:
ports:
- port: 9200
protocol: TCP
targetPort: db
selector:
k8s-app: es
---
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: es
namespace: default
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: es
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: default
name: es
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: es
namespace: default
apiGroup: ""
roleRef:
kind: ClusterRole
name: es
apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1
kind: StatefulSet #使用statefulset創(chuàng)建Pod
metadata:
name: es #pod名稱,使用statefulSet創(chuàng)建的Pod是有序號有順序的
namespace: default #命名空間
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
srv: srv-elasticsearch
spec:
serviceName: es #與svc相關(guān)聯(lián),這可以確保使用以下DNS地址訪問Statefulset中的每個pod (es-cluster-[0,1,2].elasticsearch.elk.svc.cluster.local)
replicas: 1 #副本數(shù)量,單節(jié)點
selector:
matchLabels:
k8s-app: es #和pod template配置的labels相匹配
template:
metadata:
labels:
k8s-app: es
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: es
containers:
- image: docker.io/library/elasticsearch:7.10.1
name: es
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 100m
memory: 500Mi
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- name: es
mountPath: /usr/share/elasticsearch/data/ #掛載點
env:
- name: "NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "discovery.type" #定義單節(jié)點類型
value: "single-node"
- name: ES_JAVA_OPTS #設(shè)置Java的內(nèi)存參數(shù),可以適當進行加大調(diào)整
value: "-Xms1024m -Xmx4g"
volumes:
- name: es
hostPath:
path: /data/es/
nodeSelector: #如果需要匹配落盤節(jié)點可以添加nodeSelect
es: data
tolerations:
- effect: NoSchedule
operator: Exists
# Elasticsearch requires vm.max_map_count to be at least 262144.
# If your OS already sets up this number to a higher value, feel free
# to remove this init container.
initContainers: #容器初始化前的操作
- name: es-init
image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] #添加mmap計數(shù)限制,太低可能造成內(nèi)存不足的錯誤
securityContext: #僅應(yīng)用到指定的容器上,并且不會影響Volume
privileged: true #運行特權(quán)容器
- name: increase-fd-ulimit
image: busybox
imagePullPolicy: IfNotPresent
command: ["sh", "-c", "ulimit -n 65536"] #修改文件描述符最大數(shù)量
securityContext:
privileged: true
- name: elasticsearch-volume-init #es數(shù)據(jù)落盤初始化,加上777權(quán)限
image: alpine:3.6
command:
- chmod
- -R
- "777"
- /usr/share/elasticsearch/data/
volumeMounts:
- name: es
mountPath: /usr/share/elasticsearch/data/
---
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: default
labels:
k8s-app: kibana
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Kibana"
srv: srv-kibana
spec:
type: NodePort #采用nodeport方式進行暴露,端口默認為25601
ports:
- port: 5601
nodePort: 30561
protocol: TCP
targetPort: ui
selector:
k8s-app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: default
labels:
k8s-app: kibana
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
srv: srv-kibana
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kibana
template:
metadata:
labels:
k8s-app: kibana
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
containers:
- name: kibana
image: docker.io/kubeimages/kibana:7.9.3 #該鏡像支持arm64和amd64兩種架構(gòu)
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://es:9200
ports:
- containerPort: 5601
name: ui
protocol: TCP
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kibana
namespace: yk-mysql-test
spec:
rules:
- host: kibana.ctnrs.com
http:
paths:
- path: /
backend:
serviceName: kibana
servicePort: 5601
?4 部署logstash
創(chuàng)建configMap定義logstash相關(guān)配置項,主要包括一下幾項。
input:定義輸入到logstash的源。
filter:定義過濾條件。
output:可以定義輸出到es,redis,kafka等等。
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-config
namespace: halashow
data:
logstash.conf: |-
input {
redis {
host => "10.36.21.220"
port => 30079
db => 0
key => "localhost"
password => "123456"
data_type => "list"
threads => 4
batch_count => "1"
#tags => "user.log"
}
}
filter {
dissect {
mapping => { "message" => "[%{Time}] %{LogLevel} %{message}" }
}
}
output {
if "nginx.log" in [tags] {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "nginx.log"
}
}
if "osale-uc-test" in [tags] {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "osale-uc-test"
}
}
if "osale-jindi-client-test" in [tags] {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "osale-jindi-client-test"
}
}
if "osale-admin-weixin" in [tags] {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "osale-admin-weixin"
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: halashow
labels:
name: logstash
spec:
replicas: 1
selector:
matchLabels:
name: logstash
template:
metadata:
labels:
app: logstash
name: logstash
spec:
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:7.12.0
ports:
- containerPort: 5044
protocol: TCP
- containerPort: 9600
protocol: TCP
volumeMounts:
- name: logstash-config
#mountPath: /usr/share/logstash/logstash-simple.conf
#mountPath: /usr/share/logstash/config/logstash-sample.conf
mountPath: /usr/share/logstash/pipeline/logstash.conf
subPath: logstash.conf
#ports:
# - containerPort: 80
# protocol: TCP
volumes:
- name: logstash-config
configMap:
#defaultMode: 0644
name: logstash-config
---
apiVersion: v1
kind: Service
metadata:
namespace: halashow
name: logstash
labels:
app: logstash
spec:
type: ClusterIP
ports:
- port: 5044
name: logstash
selector:
app: logstash
5.部署redis5.0?
apiVersion: v1
kind: ConfigMap
metadata:
name: elk-redis
labels:
app: elk-redis
data:
redis.conf: |-
bind 0.0.0.0
daemonize no
pidfile "/var/run/redis.pid"
port 6380
timeout 300
loglevel warning
logfile "redis.log"
databases 16
rdbcompression yes
dbfilename "redis.rdb"
dir "/data"
requirepass "123456"
masterauth "123456"
maxclients 10000
maxmemory 1000mb
maxmemory-policy allkeys-lru
appendonly yes
appendfsync always
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elk-redis
labels:
app: elk-redis
spec:
replicas: 1
selector:
matchLabels:
app: elk-redis
template:
metadata:
labels:
app: elk-redis
spec:
containers:
- name: redis
image: redis:5.0.7
command:
- "sh"
- "-c"
- "redis-server /usr/local/redis/redis.conf"
ports:
- containerPort: 6379
resources:
limits:
cpu: 1000m
memory: 1024Mi
requests:
cpu: 1000m
memory: 1024Mi
livenessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 300
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
tcpSocket:
port: 6379
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
volumeMounts:
- name: data
mountPath: /data
# 時區(qū)設(shè)置
- name: timezone
mountPath: /etc/localtime
- name: config
mountPath: /usr/local/redis/redis.conf
subPath: redis.conf
volumes:
- name: config
configMap:
name: elk-redis
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: data
hostPath:
type: DirectoryOrCreate
path: /data/elk/elk-redis
nodeName: gem-yxyw-t-c02
---
為了提升redis的性能需要關(guān)閉持久化
、redis默認是開啟持久化的
2、默認持久化方式為RDB
1、注釋掉原來的持久化規(guī)則
-
# save 3600 1 300 100 60 10000
2、把 save 節(jié)點設(shè)置為空
save ""
3、刪除 dump.rdb 轉(zhuǎn)儲文件
rm -f dump.rdb
1、設(shè)置?appendonly 的值為 no 即可
6. 部署filebeat,部署k8s上沒有成功,改成源碼部署到主機成功了
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.8.0-linux-x86_64.tar.gz
tar -zxvf filebeat-7.8.0-linux-x86_64.tar.gz
vi /data/elk/filebeat/filebeat-7.8.0-linux-x86_64/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /data/test-logs/osale-uc-test/*.log
fields:
tags: ["osale-uc-test"]
- type: log
enabled: true
paths:
- /data/test-logs/osale-jindi-client-test/*.log
fields:
tags: ["osale-jindi-client-test"]
- type: log
enabled: true
paths:
- /data/test-logs/osale-admin-weixin-test/*/osale-admin-weixin/*.log
fields:
tags: ["osale-admin-weixin"]
- type: log
enabled: true
paths:
- /data/tengine/logs/*.log
fields:
tags: ["nginx.log"]
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
output.redis:
enabled: true
hosts: ["10.36.21.220:30079"]
password: "123456"
db: 0
key: localhost
worker: 4
timeout: 5
max_retries: 3
datatype: list
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config-to-logstash
namespace: halashow
data:
filebeat.yml: |-
filebeat.inputs:
- type: log
paths:
- /logm/*.log
output.logstash:
hosts: ['logstash:5044']
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: filebeat
namespace: halashow
labels:
name: filebeat
spec:
replicas: 1
selector:
matchLabels:
name: filebeat
template:
metadata:
labels:
app: filebeat
name: filebeat
spec:
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.12.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
volumeMounts:
- mountPath: /logm
name: logm
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
volumes:
- name: logm
emptyDir: {}
- name: config
configMap:
defaultMode: 0640
name: filebeat-config-to-logstash
cd /data/elk/filebeat-7.8.0-linux-x86_64?
?
sudo ./filebeat -e -c filebeat.yml -d "publish" ? ? ? ?#前臺啟動filebeat
?
nohup ./filebeat -e -c filebeat.yml >/dev/null 2>&1& ? #后臺啟動
?
6 部署kibana
6.1 準備資源配置清單
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: halashow
labels:
name: kibana
spec:
serviceName: halashow
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
app: kibana
name: kibana
spec:
restartPolicy: Always
containers:
- name: kibana
image: kibana:7.12.0
imagePullPolicy: Always
ports:
- containerPort: 5601
resources:
requests:
memory: 1024Mi
cpu: 50m
limits:
memory: 1024Mi
cpu: 1000m
volumeMounts:
- name: kibana-config
mountPath: /usr/share/kibana/config/kibana.yml
subPath: kibana.yml
volumes:
- name: kibana-config
configMap:
name: kibana-cm
items:
- key: "kibana.yml"
path: "kibana.yml"
---
apiVersion: v1
kind: Service
metadata:
labels:
app: kibana
name: kibana
namespace: halashow
spec:
type: NodePort
ports:
- name: kibana
port: 5601
nodePort: 30102
protocol: TCP
targetPort: 5601
selector:
app: kibana
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN" #kibana漢化
location /? #必須是/否則代碼不上
passpoxry http:ip:port
?1.Kibana設(shè)置elasticsearch索引過期時間,到期自動刪除
首先創(chuàng)建一個索引,索引必須名字后面*這樣所有日期都能檢索出。然后在創(chuàng)建一個十天的日志生命周期管理,在創(chuàng)建一個索引模板,索引模板可以檢索出所有需要檢索的日志,這個索引模板可以直接復(fù)制日志生命周期管理代碼,也可之后日志生命周期里面加入這個索引模板。
?
2.創(chuàng)建一個索引生命周期策略?
Index Management? ??索引管理
Index Lifecycle Policies? ?索引生命周期策略
Delete phase? 刪除階段
?
?
?
3.創(chuàng)建一個索引模板用來管理所有索引
Index Templates? ??索引模板
?
?
?
{
"index": {
"lifecycle": {
"name": "gdnb-tes*"
}
}
}
?可將索引周期管理的代碼復(fù)制過去,也可直接到索引周期管理里面選擇gdnb-test-10day這個索引模板
?
3.將需要保存十天的日志索引模板加入剛創(chuàng)建的周期生命管理?
?文章來源:http://www.zghlxwxcb.cn/news/detail-441664.html
?文章來源地址http://www.zghlxwxcb.cn/news/detail-441664.html
到了這里,關(guān)于在k8s集群部署ELK的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!