【云原生 | kubernetes】kubeadm部署kubernetes(k8s)1.25.2基于containerd
【云原生 | kubernetes】kubeadm部署kubernetes(k8s)1.25.2基于containerd
·
kubeadm部署kubernetes(k8s)1.25.2基于containerd
所有节点
修改主机映射
master节点
hostnamectl set-hostname master
bash
node节点
hostnamectl set-hostname node01
bash
node节点
hostnamectl set-hostname node02
bash
升级内核
#更新yum源仓库
yum update -y
#导入ELRepo仓库的公共密钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
#安装ELRepo仓库的yum源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装最新版本内核
yum --enablerepo=elrepo-kernel install -y kernel-ml
#查看系统上的所有可用内核
sudo awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
#设置默认版本,其中 0 是上面查询出来的可用内核
grub2-set-default 0
#生成 grub 配置文件
grub2-mkconfig -o /boot/grub2/grub.cfg
#查看系统中全部的内核
rpm -qa | grep kernel
kernel-ml-6.0.0-1.el7.elrepo.x86_64
kernel-3.10.0-862.el7.x86_64
kernel-tools-libs-3.10.0-1160.76.1.el7.x86_64
kernel-3.10.0-1160.76.1.el7.x86_64
kernel-tools-3.10.0-1160.76.1.el7.x86_64
#删除旧内核的 RPM 包,具体内容视上述命令的返回结果而定
yum remove -y kernel-3.10.0-* kernel-tools-*
#重启
reboot
添加主机映射
echo 192.168.4.226 master >> /etc/hosts
echo 192.168.4.227 node01 >> /etc/hosts
echo 192.168.4.228 node02 >> /etc/hosts
关闭swap分区以及防火墙,并开机自动关闭
swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
systemctl stop firewalld && systemctl disable firewalld
下载软件
yum -y install wget net-tools sysstat
安装containerd
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install containerd.io
修改初始化containerd配置
containerd config default | tee /etc/containerd/config.toml
sed -i 's/sandbox_image = "registry.k8s.io\/pause:3.6"/sandbox_image = "registry.aliyuncs.com\/google_containers\/pause:3.6"/g' /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
sed -i 's/io.containerd.runc.v2/io.containerd.runtime.v1.linux/g' /etc/containerd/config.toml
启动containerd并开机自启
systemctl start containerd && systemctl enable containerd
配置IP转发和加载模块
cat <<EOF | tee /etc/modules-load.d/kubernetes1.25.conf
overlay
br_netfilter
EOF
cat <<EOF | tee /etc/sysctl.d/kubernetes1.25-forsys.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
modprobe br_netfilter
modprobe overlay
sysctl --system
基于阿里云镜像源yum安装kubelet、kubeadm、kubectl
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
EOF
yum install -y --nogpgcheck kubelet-1.25.2-0 kubeadm-1.25.2-0 kubectl-1.25.2-0
启动kubelet并开机自启
systemctl start kubelet && systemctl enable kubelet
修改containerd端点
crictl config runtime-endpoint unix:///run/containerd/containerd.sock
sed -i 's/image-endpoint: ""/image-endpoint: "unix:\/\/\/run\/containerd\/containerd.sock"/g' /etc/crictl.yaml
systemctl daemon-reload
master节点
初始化集群配置
kubeadm config print init-defaults > init.yaml
sed -i 's/1.2.3.4/192.168.4.226/g' init.yaml
sed -i 's/name: node/name: master/g' init.yaml
sed -i 's/registry.k8s.io/registry.aliyuncs.com\/google_containers/g' init.yaml
kubeadm初始化kubernetes集群
kubeadm init --config=init.yaml
建admin配置目录
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
创建个永不过期的令牌
kubeadm token create --ttl 0 --print-join-command
kubeadm join 192.168.4.226:6443 --token z9wthy.26kbpxrwgxkh15we --discovery-token-ca-cert-hash sha256:a1697cc770ff98293ee67479cda4ce82bcd8ef0c0fc110bc1eab80bd91efa26d
node节点
node节点加入集群
kubeadm join 192.168.4.226:6443 --token z9wthy.26kbpxrwgxkh15we --discovery-token-ca-cert-hash sha256:a1697cc770ff98293ee67479cda4ce82bcd8ef0c0fc110bc1eab80bd91efa26d
部署插件
部署calico网络插件
wget https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml
kubectl apply -f calico.yaml
部署web界面插件
dashboard
创建dashboard的yaml文件
官方网站:https://github.com/kubernetes/dashboard#kubernetes-dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
or
cat << EOF| tee dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
EOF
部署dashboard组件
kubectl apply -f dashboard.yaml
kubectl get all -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
pod/dashboard-metrics-scraper-64bcc67c9c-l7z8g 1/1 Running 0 58m
pod/kubernetes-dashboard-5c8bd6b59-8ztjp 1/1 Running 0 58m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/dashboard-metrics-scraper ClusterIP 10.97.148.63 <none> 8000/TCP 58m
service/kubernetes-dashboard ClusterIP 10.108.14.195 <none> 443/TCP 58m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/dashboard-metrics-scraper 1/1 1 1 58m
deployment.apps/kubernetes-dashboard 1/1 1 1 58m
NAME DESIRED CURRENT READY AGE
replicaset.apps/dashboard-metrics-scraper-64bcc67c9c 1 1 1 58m
replicaset.apps/kubernetes-dashboard-5c8bd6b59 1 1 1 58m
修改dashboard的svc类型
kubectl patch svc kubernetes-dashboard -p '{"spec":{"type":"NodePort"}}' -n kubernetes-dashboard
创建dashboard访问token
cat <<EOF | tee dashboard-token.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
kubectl apply -f dashboard-token.yaml
kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6ImFqYmF3UXo3UHRnTW02ZUNVSGZSNnRreVBuX2JQYzZvRU92WFFmLTA5ZGsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjY1NDcwMDQzLCJpYXQiOjE2NjU0NjY0NDMsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMjdjODMwYTEtMjEwYy00MDExLWE4NjEtZmQ2YTQ2MmZjZGVlIn19LCJuYmYiOjE2NjU0NjY0NDMsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.3OPKWpPvSN7ygrZfryJHxu6Q7pJoNJyd_BN3ME7fhc9cM9v9sRbL5y3tue0ehvv-9kLuMAX8OXlRI4_zpYy3vpTmslzkRePKCoTTblxxi0Xn_etEEguhO30QAsYGTmwCRPp3sOWF9mBNYsI0KO4RbcFRcbeFfsm2WT3flpGAdYh3sqCoQjABKkjYTE4rkp9EZHfqklgt4pHiRG1M7d1RGq2Oq2OkHSzgnpBoGdGMl8AWylnCBE8wUZExSflx1iEDPwM6mtE9P1z9l_9W-BBOHb5BWa-2mEj0NH25C1zf67e1kkUHm7SZIxlP_KW40xAsCRAjw7SbLE3NO86mXLdSQQ
访问dashboard
kubectl get pods,svc -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
pod/dashboard-metrics-scraper-64bcc67c9c-l7z8g 1/1 Running 0 29m
pod/kubernetes-dashboard-5c8bd6b59-8ztjp 1/1 Running 0 29m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/dashboard-metrics-scraper ClusterIP 10.97.148.63 <none> 8000/TCP 29m
service/kubernetes-dashboard NodePort 10.108.14.195 <none> 443:32573/TCP 29m
在页面中输入https://IP:32573访问,点击继续访问,使用创建的token进行登入
kubectl -n kubernetes-dashboard create token admin-user
Weave Scope
创建 Weave Scope的yaml文件
官方网站:https://www.weave.works/docs/scope/latest/installing/#k8s
wget https://github.com/weaveworks/scope/releases/download/v1.13.2/k8s-scope.yaml
or
cat << EOF| tee k8s-scope.yaml
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Namespace
metadata:
name: weave
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
name: weave-scope
name: weave-scope
namespace: weave
rules:
- apiGroups:
- ""
resources:
- pods
- pods/log
- replicationcontrollers
- services
- namespaces
- persistentvolumes
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- delete
- apiGroups:
- apps
resources:
- deployments
- statefulsets
- daemonsets
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- deployments/scale
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- deployments/scale
verbs:
- update
- apiGroups:
- apps
resources:
- deployments/scale
verbs:
- get
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- list
- watch
- apiGroups:
- extensions
resourceNames:
- weave-scope
resources:
- podsecuritypolicies
verbs:
- use
- apiGroups:
- volumesnapshot.external-storage.k8s.io
resources:
- volumesnapshots
- volumesnapshotdatas
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: weave-scope
labels:
name: weave-scope
namespace: weave
roleRef:
kind: ClusterRole
name: weave-scope
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-scope
namespace: weave
- apiVersion: apps/v1
kind: Deployment
metadata:
name: weave-scope-app
labels:
name: weave-scope-app
app: weave-scope
weave-cloud-component: scope
weave-scope-component: app
namespace: weave
spec:
replicas: 1
selector:
matchLabels:
app: weave-scope
revisionHistoryLimit: 2
template:
metadata:
labels:
name: weave-scope-app
app: weave-scope
weave-cloud-component: scope
weave-scope-component: app
spec:
containers:
- name: app
args:
- '--no-probe'
env: [ ]
image: weaveworks/scope:1.13.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 4040
protocol: TCP
resources:
requests:
cpu: 200m
memory: 200Mi
- apiVersion: apps/v1
kind: DaemonSet
metadata:
name: weave-scope-agent
labels:
name: weave-scope-agent
app: weave-scope
weave-cloud-component: scope
weave-scope-component: agent
namespace: weave
spec:
minReadySeconds: 5
selector:
matchLabels:
app: weave-scope
template:
metadata:
labels:
name: weave-scope-agent
app: weave-scope
weave-cloud-component: scope
weave-scope-component: agent
spec:
containers:
- name: scope-agent
args:
- '--mode=probe'
- '--probe-only'
- '--probe.kubernetes.role=host'
- '--probe.docker.bridge=docker0'
- '--probe.docker=true'
- 'weave-scope-app.weave.svc.cluster.local.:80'
env:
- name: SCOPE_HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: weaveworks/scope:1.13.2
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
- name: scope-plugins
mountPath: /var/run/scope/plugins
- name: sys-kernel-debug
mountPath: /sys/kernel/debug
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
hostPID: true
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: docker-socket
hostPath:
path: /var/run/docker.sock
- name: scope-plugins
hostPath:
path: /var/run/scope/plugins
- name: sys-kernel-debug
hostPath:
path: /sys/kernel/debug
updateStrategy:
rollingUpdate:
maxUnavailable: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
name: weave-scope-cluster-agent
labels:
name: weave-scope-cluster-agent
app: weave-scope
weave-cloud-component: scope
weave-scope-component: cluster-agent
namespace: weave
spec:
replicas: 1
selector:
matchLabels:
name: weave-scope-cluster-agent
app: weave-scope
weave-cloud-component: scope
weave-scope-component: cluster-agent
revisionHistoryLimit: 2
template:
metadata:
labels:
name: weave-scope-cluster-agent
app: weave-scope
weave-cloud-component: scope
weave-scope-component: cluster-agent
spec:
containers:
- name: scope-cluster-agent
args:
- '--mode=probe'
- '--probe-only'
- '--probe.kubernetes.role=cluster'
- 'weave-scope-app.weave.svc.cluster.local.:80'
command:
- /home/weave/scope
image: 'docker.io/weaveworks/scope:1.13.2'
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: scope-plugins
mountPath: /var/run/scope/plugins
serviceAccountName: weave-scope
volumes:
- name: scope-plugins
hostPath:
path: /var/run/scope/plugins
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-scope
labels:
name: weave-scope
namespace: weave
- apiVersion: v1
kind: Service
metadata:
name: weave-scope-app
labels:
name: weave-scope-app
app: weave-scope
weave-cloud-component: scope
weave-scope-component: app
namespace: weave
spec:
ports:
- name: app
port: 80
protocol: TCP
targetPort: 4040
selector:
name: weave-scope-app
app: weave-scope
weave-cloud-component: scope
weave-scope-component: app
EOF
部署 Weave Scope 组件
kubectl apply -f k8s-scope.yaml
kubectl get all -n weave
NAME READY STATUS RESTARTS AGE
pod/weave-scope-agent-5xdnj 1/1 Running 0 12m
pod/weave-scope-agent-7xjf7 1/1 Running 0 12m
pod/weave-scope-agent-wxnsm 1/1 Running 0 12m
pod/weave-scope-app-8ccc4d754-bnj5c 1/1 Running 0 12m
pod/weave-scope-cluster-agent-59cc85cbcc-2wd8s 1/1 Running 0 12m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/weave-scope-app ClusterIP 10.101.155.69 <none> 80/TCP 12m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/weave-scope-agent 3 3 3 3 3 <none> 12m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/weave-scope-app 1/1 1 1 12m
deployment.apps/weave-scope-cluster-agent 1/1 1 1 12m
NAME DESIRED CURRENT READY AGE
replicaset.apps/weave-scope-app-8ccc4d754 1 1 1 12m
replicaset.apps/weave-scope-cluster-agent-59cc85cbcc 1 1 1 12m
修改Weave Scope的svc类型
kubectl patch svc weave-scope-app -p '{"spec":{"type":"NodePort"}}' -n weave
访问Weave Scope
# kubectl get pods,svc -n weave
NAME READY STATUS RESTARTS AGE
pod/weave-scope-agent-5xdnj 1/1 Running 0 14m
pod/weave-scope-agent-7xjf7 1/1 Running 0 14m
pod/weave-scope-agent-wxnsm 1/1 Running 0 14m
pod/weave-scope-app-8ccc4d754-bnj5c 1/1 Running 0 14m
pod/weave-scope-cluster-agent-59cc85cbcc-2wd8s 1/1 Running 0 14m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/weave-scope-app NodePort 10.101.155.69 <none> 80:32396/TCP 14m
在页面中输入http://IP:32396访问
部署Metrics
创建MetricsServer的yaml文件
cat << EOF| tee metrics-server.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
image: bitnami/metrics-server:0.4.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
EOF
部署MetricsServer组件
kubectl apply -f metrics-server.yaml
查看节点资源情况
kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
master 124m 6% 1642Mi 43%
node01 674m 33% 1230Mi 32%
node02 221m 11% 1191Mi 31%
通过dashboard界面,多了一些资源使用情况可视化数据
更多推荐
已为社区贡献2条内容
所有评论(0)