k8s学习(二十九) k8s中部署kube-keepalived-vip
在当前的kubernetes群集中使用service为每个节点上分配一个端口(相同的端口),并将通过该端口的流量代理到端点。问题是它不能提供高可用性,因为需要事先知道正在运行的节点的IP地址,并且在发生故障的情况下,可以将Pod移到其他节点。 这是ipvs可以提供帮助的地方。 这个想法是为每个服务定义一个IP地址,以将其公开到Kubernetes集群之外,这里使用kube-keepalived-v
·
在当前的kubernetes群集中使用service为每个节点上分配一个端口(相同的端口),并将通过该端口的流量代理到端点。问题是它不能提供高可用性,因为需要事先知道正在运行的节点的IP地址,并且在发生故障的情况下,可以将Pod移到其他节点。 这是ipvs可以提供帮助的地方。 这个想法是为每个服务定义一个IP地址,以将其公开到Kubernetes集群之外,这里使用kube-keepalived-vip实现。
1、准备kube-keepaliaved-vip的镜像
docker pull aledbf/kube-keepalived-vip:0.35
docker save -o kube-keepalived-vip.tar aledbf/kube-keepalived-vip:0.35
将kube-keepalived-vip.tar拷贝至服务器
docker load -i kube-keepalived-vip.tar
docker tag aledbf/kube-keepalived-vip:0.35 192.168.100.91:80/kube-keepalived-vip:0.35
docker push 192.168.100.91:80/kube-keepalived-vip:0.35
2、编写vip-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kube-keepalived-vip
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- endpoints
- services
- configmaps
verbs: ["get", "list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-keepalived-vip
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kube-keepalived-vip
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-keepalived-vip
subjects:
- kind: ServiceAccount
name: kube-keepalived-vip
namespace: default
3、编写一个示例Nginx服务nginx-demo.yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
nodePort: 30302
targetPort: 80
protocol: TCP
name: http
selector:
app: nginx
4、编写configmap-vip.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: vip-configmap
data:
192.168.100.86: default/nginx
5、编写vip-daemonset.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-keepalived-vip
spec:
template:
metadata:
labels:
name: kube-keepalived-vip
spec:
hostNetwork: true
serviceAccount: kube-keepalived-vip
containers:
- image: 192.168.100.91:80/kube-keepalived-vip:0.35
name: kube-keepalived-vip
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 3
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /dev
name: dev
# use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# to use unicast
args:
- --services-configmap=default/vip-configmap
# unicast uses the ip of the nodes instead of multicast
# this is useful if running in cloud providers (like AWS)
#- --use-unicast=true
volumes:
- name: modules
hostPath:
path: /lib/modules
- name: dev
hostPath:
path: /dev
#nodeSelector:
# type: worker
6、应用各个yaml
kubectl apply -f nginx-demo.yaml
kubectl apply -f vip-rbac.yaml
kubectl apply -f configmap-vip.yaml
kubectl apply -f vip-daemonset.yaml
7、查看配置
[root@k8s-master keepaliaved-vip]# kubectl exec kube-keepalived-vip-768px cat /etc/keepalived/keepalived.conf
global_defs {
vrrp_version 3
vrrp_iptables KUBE-KEEPALIVED-VIP
}
#Check if the VIP list is empty
vrrp_instance vips {
state BACKUP
interface ens33
virtual_router_id 50
priority 101
nopreempt
advert_int 1
track_interface {
ens33
}
virtual_ipaddress {
192.168.100.86
}
notify /keepalived-check.sh
}
# Service: default-nginx
virtual_server 192.168.100.86 80 {
delay_loop 5
lvs_sched wlc
lvs_method NAT
persistence_timeout 1800
protocol TCP
real_server 10.244.1.57 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
real_server 10.244.2.70 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
real_server 10.244.3.62 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
}
#End if vip list is empty
[root@k8s-master keepaliaved-vip]#
8、访问测试
更多推荐
已为社区贡献5条内容
所有评论(0)