k8s 搭建微服务日志收集 log-pilot + kafka + logstash + es + kibana

本文采用的是阿里云开源的log-pilot,根据filebeat二次开发而来的,而log-pilot对微服务的日志收集有着巨大的优势的,关于log-pilot更加详细的报告,大家可以看看这篇《利用开源组件Log-pilot搭建Kubernetes日志解决方案
简单画了个图
日志收集方案

各组件版本介绍
log-pilot: 0.9.7
kafka: 2.2.0
zookeeper: 3.4.10
logstash: 6.8.0
es: 6.8.0
kibana: 6.8.0

为了安全起见建议大家可以到 https://hub.docker.com docker官方镜像仓库去下载各个组件的docker镜像

  1. 部署kafka+zk
#存储大小和相关配置大家可以根据各自情况修改,
#没有具体需求大家可以先保持默认
cat kafka.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: kafka
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    name: server
  clusterIP: None
  selector:
    app: kafka
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: kafka-pdb
  namespace: kafka
spec:
  selector:
    matchLabels:
      app: kafka
  minAvailable: 2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: kafka
spec:
  selector:
     matchLabels:
        app: kafka
  serviceName: kafka-svc
  replicas: 3
  template:
    metadata:
      labels:
        app: kafka
    spec:
      tolerations:
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "NoSchedule"
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "NoExecute"
        tolerationSeconds: 3600
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "PreferNoSchedule"
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - kafka
              topologyKey: "kubernetes.io/hostname"
        podAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
             - weight: 1
               podAffinityTerm:
                 labelSelector:
                    matchExpressions:
                      - key: "app"
                        operator: In
                        values:
                        - zk
                 topologyKey: "kubernetes.io/hostname"
      terminationGracePeriodSeconds: 300
      containers:
      - name: k8s-kafka
        imagePullPolicy: Always
        image: harbor.suzhuchang.com/devops/kafka:2.2.0
        resources:
          requests:
            memory: "600Mi"
            cpu: 500m
        ports:
        - containerPort: 9092
          name: server
        command:
        - sh
        - -c
        - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
          --override listeners=PLAINTEXT://:9092 \
          --override zookeeper.connect=zk-0.zk-hs.kafka.svc.cluster.local:2181,zk-1.zk-hs.kafka.svc.cluster.local:2181,zk-2.zk-hs.kafka.svc.cluster.local:2181 \
          --override log.dir=/var/lib/kafka \
          --override auto.create.topics.enable=true \
          --override auto.leader.rebalance.enable=true \
          --override background.threads=10 \
          --override compression.type=producer \
          --override delete.topic.enable=false \
          --override leader.imbalance.check.interval.seconds=300 \
          --override leader.imbalance.per.broker.percentage=10 \
          --override log.flush.interval.messages=9223372036854775807 \
          --override log.flush.offset.checkpoint.interval.ms=60000 \
          --override log.flush.scheduler.interval.ms=9223372036854775807 \
          --override log.retention.bytes=-1 \
          --override log.retention.hours=1 \
          --override log.roll.hours=168 \
          --override log.roll.jitter.hours=0 \
          --override log.segment.bytes=1073741824 \
          --override log.segment.delete.delay.ms=60000 \
          --override message.max.bytes=1000012 \
          --override min.insync.replicas=1 \
          --override num.io.threads=8 \
          --override num.network.threads=3 \
          --override num.recovery.threads.per.data.dir=1 \
          --override num.replica.fetchers=1 \
          --override offset.metadata.max.bytes=4096 \
          --override offsets.commit.required.acks=-1 \
          --override offsets.commit.timeout.ms=5000 \
          --override offsets.load.buffer.size=5242880 \
          --override offsets.retention.check.interval.ms=600000 \
          --override offsets.retention.minutes=1440 \
          --override offsets.topic.compression.codec=0 \
          --override offsets.topic.num.partitions=50 \
          --override offsets.topic.replication.factor=3 \
          --override offsets.topic.segment.bytes=104857600 \
          --override queued.max.requests=500 \
          --override quota.consumer.default=9223372036854775807 \
          --override quota.producer.default=9223372036854775807 \
          --override replica.fetch.min.bytes=1 \
          --override replica.fetch.wait.max.ms=500 \
          --override replica.high.watermark.checkpoint.interval.ms=5000 \
          --override replica.lag.time.max.ms=10000 \
          --override replica.socket.receive.buffer.bytes=65536 \
          --override replica.socket.timeout.ms=30000 \
          --override request.timeout.ms=30000 \
          --override socket.receive.buffer.bytes=102400 \
          --override socket.request.max.bytes=104857600 \
          --override socket.send.buffer.bytes=102400 \
          --override unclean.leader.election.enable=true \
          --override zookeeper.session.timeout.ms=6000 \
          --override zookeeper.set.acl=false \
          --override broker.id.generation.enable=true \
          --override connections.max.idle.ms=600000 \
          --override controlled.shutdown.enable=true \
          --override controlled.shutdown.max.retries=3 \
          --override controlled.shutdown.retry.backoff.ms=5000 \
          --override controller.socket.timeout.ms=30000 \
          --override default.replication.factor=1 \
          --override fetch.purgatory.purge.interval.requests=1000 \
          --override group.max.session.timeout.ms=300000 \
          --override group.min.session.timeout.ms=6000 \
          --override inter.broker.protocol.version=2.2.0 \
          --override log.cleaner.backoff.ms=15000 \
          --override log.cleaner.dedupe.buffer.size=134217728 \
          --override log.cleaner.delete.retention.ms=3200000 \
          --override log.cleaner.enable=true \
          --override log.cleaner.io.buffer.load.factor=0.9 \
          --override log.cleaner.io.buffer.size=524288 \
          --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
          --override log.cleaner.min.cleanable.ratio=0.5 \
          --override log.cleaner.min.compaction.lag.ms=0 \
          --override log.cleaner.threads=1 \
          --override log.cleanup.policy=delete \
          --override log.index.interval.bytes=4096 \
          --override log.index.size.max.bytes=10485760 \
          --override log.message.timestamp.difference.max.ms=9223372036854775807 \
          --override log.message.timestamp.type=CreateTime \
          --override log.preallocate=false \
          --override log.retention.check.interval.ms=300000 \
          --override max.connections.per.ip=2147483647 \
          --override num.partitions=4 \
          --override producer.purgatory.purge.interval.requests=1000 \
          --override replica.fetch.backoff.ms=1000 \
          --override replica.fetch.max.bytes=1048576 \
          --override replica.fetch.response.max.bytes=10485760 \
          --override reserved.broker.max.id=1000 "
        env:
        - name: KAFKA_HEAP_OPTS
          value : "-Xmx512M -Xms512M"
        - name: KAFKA_OPTS
          value: "-Dlogging.level=INFO"
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/kafka
        readinessProbe:
          tcpSocket:
            port: 9092
          timeoutSeconds: 1
          initialDelaySeconds: 5
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: rook-ceph-block
      resources:
        requests:
          storage:  50Gi

#
cat zk.yaml
apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  namespace: kafka
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  namespace: kafka
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
  namespace: kafka
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
  namespace: kafka
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zk
    spec:
      tolerations:
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "NoSchedule"
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "NoExecute"
        tolerationSeconds: 3600
      - key: "travis.io/schedule-only"
        operator: "Equal"
        value: "kafka"
        effect: "PreferNoSchedule"
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: Always
        image: harbor.suzhuchang.com/devops/zookeeper:3.4.10
        resources:
          requests:
            memory: "200Mi"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: rook-ceph-block
      resources:
        requests:
          storage: 15Gi
kubeclt apply -f zk.yaml
kubectl apply -f kafka.yaml

kafka
2. 搭建log-pilot
搭建log-pilot 并把日志打到kafka里面
log-pilot建议大家根据自家日志情况配置好正则表达式收集,然后重新打包成镜像

cat log-pilot-ds.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: log-pilot
  namespace: kube-logging
  labels:
    app: log-pilot
spec:
  updateStrategy:
    type: RollingUpdate
  selector:
    matchLabels:
      app: log-pilot
  template:
    metadata:
      labels:
        app: log-pilot
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      containers:
      - name: log-pilot
        image: harbor.suzhuchang.com/devops/log-pilot:0.9.71
        resources:
          limits:
            memory: 500Mi
          requests:
            cpu: 200m
            memory: 200Mi
        env:
          - name: "NODE_NAME"
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
          - name: "LOGGING_OUTPUT"
            value: "kafka"
          - name: "KAFKA_BROKERS"
            value: "kafka-0.kafka-svc.kafka:9092,kafka-1.kafka-svc.kafka:9092,kafka-2.kafka-svc.kafka:9092"
        volumeMounts:
        - name: sock
          mountPath: /var/run/docker.sock
        - name: root
          mountPath: /host
          readOnly: true
        - name: varlib
          mountPath: /var/lib/filebeat
        - name: varlog
          mountPath: /var/log/filebeat
        - name: localtime
          mountPath: /etc/localtime
          readOnly: true
        livenessProbe:
          failureThreshold: 3
          exec:
            command:
            - /pilot/healthz
          initialDelaySeconds: 10
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 2
        securityContext:
          capabilities:
            add:
            - SYS_ADMIN
      terminationGracePeriodSeconds: 30
      volumes:
      - name: sock
        hostPath:
          path: /var/run/docker.sock
      - name: root
        hostPath:
          path: /
      - name: varlib
        hostPath:
          path: /var/lib/filebeat
          type: DirectoryOrCreate
      - name: varlog
        hostPath:
          path: /var/log/filebeat
          type: DirectoryOrCreate
      - name: localtime
        hostPath:
          path: /etc/localtime

kubectl apply -f  log-pilot-ds.yaml
  1. 创建es集群和kibana
cat es-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: es-conf
  namespace: kube-logging
data:
  elasticsearch.yml: |
    node.name: ${HOSTNAME}
    cluster.name: hotes-backup
    network.host: "0.0.0.0"
    bootstrap.memory_lock: false
    discovery.zen.ping.unicast.hosts: ${CLUSTER_NODES}
    discovery.zen.minimum_master_nodes: 1
cat es-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: es-inner-cluster
  namespace: kube-logging
spec:
  selector:
    app: es-cluster
  clusterIP: None
  ports:
    - name: inner
      port: 9300
---
apiVersion: v1
kind: Service
metadata:
  name: es-rest
  namespace: kube-logging
spec:
  selector:
    app: es-cluster
  type: NodePort
  ports:
    - name: http
      port: 9200
      targetPort: 9200

cat es-statefulset.yaml
 apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: esnode
  namespace: kube-logging
  labels:
    app: es-cluster
spec:
  serviceName: es-inner-cluster
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  selector:
    matchLabels:
        app: es-cluster
  template:
    metadata:
      labels:
        app: es-cluster
    spec:
      initContainers:
        - name: fix-permissions
          image: busybox:latest
          imagePullPolicy: IfNotPresent
          securityContext:
            privileged: true
          command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
          volumeMounts:
            - name: es-data
              mountPath: /usr/share/elasticsearch/data
        - name: init-ulimit
          image: busybox:latest
          imagePullPolicy: IfNotPresent
          securityContext:
            privileged: true
          command: ["sh", "-c", "ulimit -n 655350"]
        - name: init-sysctl
          image: busybox:latest
          imagePullPolicy: IfNotPresent
          securityContext:
            privileged: true
          command: ["sysctl", "-w", "vm.max_map_count=262144"]
      containers:
        - name: elasticsearch
          resources:
            requests:
              memory: 300Mi
              cpu: 100m
            limits:
              memory: 15Gi
              cpu: 3
          securityContext:
            privileged: true
            runAsUser: 0
            capabilities:
              add:
                - IPC_LOCK
                - SYS_RESOURCE
          image: harbor.suzhuchang.com/devops/elasticsearch:6.8.0
          imagePullPolicy: IfNotPresent
          env:
            - name: ES_JAVA_OPTS
              value: "-Xms4g -Xmx4g"
            - name: HOSTNAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: CLUSTER_NODES
              value: "esnode-0.es-inner-cluster,esnode-1.es-inner-cluster,esnode-2.es-inner-cluster"
          ports:
            - containerPort: 9200
              name: es-http
            - containerPort: 9300
              name: es-transport
          volumeMounts:
            - name: es-data
              mountPath: /usr/share/elasticsearch/data
            - name: elasticsearch-config
              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
              subPath: elasticsearch.yml
      volumes:
        - name: elasticsearch-config
          configMap:
            name: es-conf
            items:
              - key: elasticsearch.yml
                path: elasticsearch.yml
  volumeClaimTemplates:
    - metadata:
        name: es-data
        namespace: kube-logging
      spec:
        storageClassName: rook-ceph-block
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 150Gi

 cat kibana-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kibana-conf
  namespace: kube-logging
data:
  kibana.yml: |
     server.name: kibana
     server.host: "0"
     elasticsearch.hosts: [ "http://es-rest:9200" ]
     i18n.locale: "zh-CN"

cat kibana.yaml
 apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: kube-logging
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: harbor.suzhuchang.com/devops/kibana:6.8.0
        ports:
        - containerPort: 5601
          name: log-kibana
        resources:
           requests:
              memory: 800Mi
              cpu: 300m
           limits:
              memory: 15Gi
              cpu: 3
        env:
        - name: NODE_OPTIONS
          value: "--max_old_space_size=8192"
        volumeMounts:
        - mountPath: /usr/share/kibana/config/kibana.yml
          name: kibana-conf
          subPath: kibana.yml
      volumes:
      - name: kibana-conf
        configMap:
          defaultMode: 420
          name: kibana-conf

kubectl apply -f kibana-conf.yaml
kubectl apply -f kibana.yaml
kubectl apply -f es-configmap.yaml
kubectl apply -f es-svc.yaml
kubectl apply -f es-statefulset.yaml
cat logstash-conf.yaml
 apiVersion: v1
kind: ConfigMap
metadata:
   name: logstash-conf
   namespace: kube-logging
data:
   logstash.yml: |
      http.host: "0.0.0.0"
      path.config: /usr/share/logstash/configmap
      xpack.monitoring.elasticsearch.url: http://es-rest:9200
      xpack.monitoring.enabled: false

---
apiVersion: v1
kind: ConfigMap
metadata:
   name: logstash-yml
   namespace: kube-logging
data:
   pipeline.yml: |
        input {
            kafka {
                bootstrap_servers => ["kafka-0.kafka-svc.kafka:9092,kafka-1.kafka-svc.kafka:9092,kafka-2.kafka-svc.kafka:9092"]
                group_id => "logstash"
                client_id => "${HOSTNAME}"
                topics_pattern => ".*"
                consumer_threads => 5
                decorate_events => true
                codec => "json"
            }
        }

        filter {
            mutate {
              remove_field => ["@metadata", "prospector","topic","k8s_container_name","k8s_node_name"]
              remove_field => ["docker_container","k8s_pod_namespace","beat","source","k8s_pod","offset"]
              remove_field => ["tags","@version","fields"]
            }
        }

        output {
            elasticsearch {
              hosts => ["es-rest:9200"]
              index => "%{index}-%{+YYYY.MM.dd}"
              action => "index"
          }
        }

 cat logstash.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: logstash
  name: logstash
  namespace: kube-logging
spec:
  replicas: 2
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: logstash
  template:
    metadata:
      labels:
        app: logstash
    spec:
      containers:
        - name: logstash
          image: harbor.suzhuchang.com/devops/logstatsh:v1.4
          volumeMounts:
            - name: logstash-yml
              mountPath: /usr/share/logstash/configmap/pipeline.yml
              subPath: pipeline.yml
            - name: logstash-conf
              mountPath: /usr/share/logstash/config/logstash.yml
              subPath: logstash.yml
          ports:
            - containerPort: 8080
              protocol: TCP
          resources:
            requests:
              cpu: 100m
              memory: 500M
          securityContext:
            privileged: true
      volumes:
        - name: logstash-conf
          configMap:
            name: logstash-conf
        - name: logstash-yml
          configMap:
            name: logstash-yml

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: logstash
  name: logstash-service
  namespace: kube-logging
spec:
  ports:
    - port: 8080
      targetPort: 8080
  selector:
    app: logstash
  type: NodePort
 kubectl apply -f  logstash-conf.yaml
 kubectl apply -f logstash.yaml

截图

  1. 登陆kibana
    kibana
Logo

开源、云原生的融合云平台

更多推荐