1、简介
Kafka和zookeeper是两种典型的有状态的应用集群服务。首先kafka和zookeeper都需要存储盘来保存有状态信息;其次kafka和zookeeper每一个实例都需要有对应的实例Id (Kafka需broker.id, zookeeper需要my.id) 来作为集群内部每个成员的标识,集群内节点之间进行内部通信时需要用到这些标识。
对于这类服务的部署,需要解决两个大的问题:一个是状态保存,另一个是集群管理 (多服务实例管理)。kubernetes中提的StatefulSet方便了有状态集群服务在上的部署和管理。通常来说,通过下面三个手段来实现有状态集群服务的部署:
-
通过Init Container来做集群的初始化工 作。
-
通过Headless Service来维持集群成员的稳定关系。
-
通过Persistent Volume和Persistent Volume Claim提供网络存储来持久化数据。
因此,在K8S集群里面部署类似kafka、zookeeper这种有状态的服务,不能使用Deployment,必须使用StatefulSet来部署,有状态简单来说就是需要持久化数据,比如日志、数据库数据、服务状态等。
2、应用场景
-
稳定的持久化存储,即Pod重新调度后还是能访问到相同的持久化数据,基于PVC来实现。
-
稳定的网络标志,即Pod重新调度后其PodName和HostName不变,基于Headless Service(即没有Cluster IP的Service)来实现。
-
有序部署,有序扩展,即Pod是有顺序的,在部署或者扩展的时候要依据定义的顺序依次依次进行(即从0到N-1,在下一个Pod运行之前所有之前的Pod必须都是Running和Ready状态),基于init containers来实现。
-
有序收缩,有序删除(即从N-1到0)。
3、格式说明
statefulSetName-{0…N-1}.serviceName.namespace.svc.cluster.local,其中:
-
statefulSetName为StatefulSet的名字。
-
0…N-1为Pod所在的序号,从0开始到N-1。
-
serviceName为Headless Service的名字。
-
namespace为服务所在的namespace,Headless Servic和StatefulSet必须在相同的namespace。
-
svc.cluster.local为K8S的Cluster Domain集群根域。
4、创建namespace
root@ldap:/opt/k8s-server/kafka/kafka-test/test# Kubectl create ns boss
5、部署zookeeper集群
创建svc
root@ldap:/opt/k8s-server/kafka/kafka-test/test# cat zk-service.yaml
apiVersion: v1
kind: Service
metadata:name: zk-hsnamespace: bosslabels:app: zk
spec:ports:- port: 2888name: server- port: 3888name: leader-electionclusterIP: None # 使用 clusterIP 的方式,适用于 headless 服务selector:app: zk
---
apiVersion: v1
kind: Service
metadata:name: zk-csnamespace: bosslabels:app: zk
spec:ports:- port: 2181name: clientselector:app: zk
允许最多1个不可用
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:name: zk-pdbnamespace: zookeeper
spec:selector:matchLabels:app: zkmaxUnavailable: 1
部署zookeeper
root@ldap:/opt/k8s-server/kafka/kafka-test/test# vim zk.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:name: zknamespace: boss
spec:selector:matchLabels:app: zkserviceName: zk-hsreplicas: 3updateStrategy:type: RollingUpdatepodManagementPolicy: OrderedReadytemplate:metadata:labels:app: zkspec:affinity:podAntiAffinity:requiredDuringSchedulingIgnoredDuringExecution:- labelSelector:matchExpressions:- key: "app"operator: Invalues:- zktopologyKey: "kubernetes.io/hostname"containers:- name: kubernetes-zookeeperimagePullPolicy: IfNotPresentimage: "registry.cn-shanghai.aliyuncs.com/study-03/zookeeper:1.0-3.4.10"resources:requests:memory: "1Gi"cpu: "0.5"ports:- containerPort: 2181name: client- containerPort: 2888name: server- containerPort: 3888name: leader-electioncommand:- sh- -c- "start-zookeeper \--servers=3 \--data_dir=/var/lib/zookeeper/data \--data_log_dir=/var/lib/zookeeper/data/log \--conf_dir=/opt/zookeeper/conf \--client_port=2181 \--election_port=3888 \--server_port=2888 \--tick_time=2000 \--init_limit=10 \--sync_limit=5 \--heap=512M \--max_client_cnxns=60 \--snap_retain_count=3 \--purge_interval=12 \--max_session_timeout=40000 \--min_session_timeout=4000 \--log_level=INFO"readinessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5livenessProbe:exec:command:- sh- -c- "zookeeper-ready 2181"initialDelaySeconds: 10timeoutSeconds: 5volumeMounts:- name: zookeeper-datamountPath: /bitnami/zookeeperreadinessProbe:tcpSocket:port: 2181initialDelaySeconds: 20periodSeconds: 10volumeClaimTemplates:- metadata:name: zookeeper-dataspec:accessModes: [ "ReadWriteOnce" ]storageClassName: "juicefs-sc" # 根据集群调整resources:requests:storage: 8Gi
6、部署kafka集群
创建svc
root@ldap:/opt/k8s-server/kafka/kafka-test/test# cat kafka-service.yaml
apiVersion: v1
kind: Service
metadata:name: kafka-svcnamespace: bosslabels:app: kafka
spec:type: NodePort # 将服务类型设置为 NodePortports:- port: 9092name: servertargetPort: 9092 # 映射到 Pod 内部的端口nodePort: 30092 # 设置一个指定的端口,或者让 Kubernetes 自动分配selector:app: kafka
部署kafka
root@ldap:/opt/k8s-server/kafka/kafka-test/test# vim kf.yaml
apiVersion: v1
kind: Service
metadata:name: kafka-svcnamespace: bosslabels:app: kafka
spec:type: NodePort # 将服务类型设置为 NodePortports:- port: 9092name: servertargetPort: 9092 # 映射到 Pod 内部的端口nodePort: 30092 # 设置一个指定的端口,或者让 Kubernetes 自动分配selector:app: kafka
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: kafkanamespace: boss
spec:selector:matchLabels:app: kafkaserviceName: kafka-svcreplicas: 3template:metadata:labels:app: kafkaspec:containers:- name: k8s-kafkaimagePullPolicy: Always image: registry.cn-hangzhou.aliyuncs.com/aliyun_0612/kafka:3.7.1resources:requests:memory: "600Mi"cpu: 500mports:- containerPort: 9092name: servercommand:- sh- -c- "exec /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \--override listeners=PLAINTEXT://:9092 \--override zookeeper.connect=zk-cs.boss.svc.cluster.local:2181 \--override log.dir=/var/lib/kafka \--override auto.create.topics.enable=true \--override auto.leader.rebalance.enable=true \--override background.threads=10 \--override compression.type=producer \--override delete.topic.enable=false \--override leader.imbalance.check.interval.seconds=300 \--override leader.imbalance.per.broker.percentage=10 \--override log.flush.interval.messages=9223372036854775807 \--override log.flush.offset.checkpoint.interval.ms=60000 \--override log.flush.scheduler.interval.ms=9223372036854775807 \--override log.retention.bytes=-1 \--override log.retention.hours=168 \--override log.roll.hours=168 \--override log.roll.jitter.hours=0 \--override log.segment.bytes=1073741824 \--override log.segment.delete.delay.ms=60000 \--override message.max.bytes=1000012 \--override min.insync.replicas=1 \--override num.io.threads=8 \--override num.network.threads=3 \--override num.recovery.threads.per.data.dir=1 \--override num.replica.fetchers=1 \--override offset.metadata.max.bytes=4096 \--override offsets.commit.required.acks=-1 \--override offsets.commit.timeout.ms=5000 \--override offsets.load.buffer.size=5242880 \--override offsets.retention.check.interval.ms=600000 \--override offsets.retention.minutes=1440 \--override offsets.topic.compression.codec=0 \--override offsets.topic.num.partitions=50 \--override offsets.topic.replication.factor=3 \--override offsets.topic.segment.bytes=104857600 \--override queued.max.requests=500 \--override quota.consumer.default=9223372036854775807 \--override quota.producer.default=9223372036854775807 \--override replica.fetch.min.bytes=1 \--override replica.fetch.wait.max.ms=500 \--override replica.high.watermark.checkpoint.interval.ms=5000 \--override replica.lag.time.max.ms=10000 \--override replica.socket.receive.buffer.bytes=65536 \--override replica.socket.timeout.ms=30000 \--override request.timeout.ms=30000 \--override socket.receive.buffer.bytes=102400 \--override socket.request.max.bytes=104857600 \--override socket.send.buffer.bytes=102400 \--override unclean.leader.election.enable=true \--override zookeeper.session.timeout.ms=6000 \--override zookeeper.set.acl=false \--override broker.id.generation.enable=true \--override connections.max.idle.ms=600000 \--override controlled.shutdown.enable=true \--override controlled.shutdown.max.retries=3 \--override controlled.shutdown.retry.backoff.ms=5000 \--override controller.socket.timeout.ms=30000 \--override default.replication.factor=1 \--override fetch.purgatory.purge.interval.requests=1000 \--override group.max.session.timeout.ms=300000 \--override group.min.session.timeout.ms=6000 \--override inter.broker.protocol.version=2.2.0 \--override log.cleaner.backoff.ms=15000 \--override log.cleaner.dedupe.buffer.size=134217728 \--override log.cleaner.delete.retention.ms=86400000 \--override log.cleaner.enable=true \--override log.cleaner.io.buffer.load.factor=0.9 \--override log.cleaner.io.buffer.size=524288 \--override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \--override log.cleaner.min.cleanable.ratio=0.5 \--override log.cleaner.min.compaction.lag.ms=0 \--override log.cleaner.threads=1 \--override log.cleanup.policy=delete \--override log.index.interval.bytes=4096 \--override log.index.size.max.bytes=10485760 \--override log.message.timestamp.difference.max.ms=9223372036854775807 \--override log.message.timestamp.type=CreateTime \--override log.preallocate=false \--override log.retention.check.interval.ms=300000 \--override max.connections.per.ip=2147483647 \--override num.partitions=3 \--override producer.purgatory.purge.interval.requests=1000 \--override replica.fetch.backoff.ms=1000 \--override replica.fetch.max.bytes=1048576 \--override replica.fetch.response.max.bytes=10485760 \--override reserved.broker.max.id=1000 "env:- name: KAFKA_HEAP_OPTSvalue : "-Xmx512M -Xms512M"- name: KAFKA_OPTSvalue: "-Dlogging.level=INFO"- name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MSvalue: "6000"- name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MSvalue: "10000"volumeMounts:- name: kafka-datamountPath: /bitnami/kafkareadinessProbe:tcpSocket:port: 9092initialDelaySeconds: 30periodSeconds: 10volumeClaimTemplates:- metadata:name: kafka-dataspec:accessModes: [ "ReadWriteOnce" ]storageClassName: "juicefs-sc" # 根据集群调整resources:requests:storage: 8Gi
7、查看服务
root@ldap:/opt/k8s-server/kafka/kafka-test/test# kubectl get pod,svc -n boss