From 97d8928e80d6e35bf7c162096f19790857ba0bf8 Mon Sep 17 00:00:00 2001
From: tu <2843180578@qq.com>
Date: Fri, 7 Jun 2024 15:27:54 +0800
Subject: [PATCH] =?UTF-8?q?=E4=B8=8A=E4=BC=A0=E6=96=87=E4=BB=B6=E8=87=B3?=
=?UTF-8?q?=20kubernetes-MD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
kubernetes-MD/利用kubernetes部署网站项目.md | 335 ++++++
kubernetes-MD/基于Kubernetes构建ES集群.md | 314 ++++++
.../基于kubeadm部署kubernetes集群.md | 217 ++++
.../基于kubeadm部署kubernetes集群1-25版本.md | 953 ++++++++++++++++++
.../基于kubernetes部署Prometheus和Grafana.md | 684 +++++++++++++
5 files changed, 2503 insertions(+)
create mode 100644 kubernetes-MD/利用kubernetes部署网站项目.md
create mode 100644 kubernetes-MD/基于Kubernetes构建ES集群.md
create mode 100644 kubernetes-MD/基于kubeadm部署kubernetes集群.md
create mode 100644 kubernetes-MD/基于kubeadm部署kubernetes集群1-25版本.md
create mode 100644 kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
diff --git a/kubernetes-MD/利用kubernetes部署网站项目.md b/kubernetes-MD/利用kubernetes部署网站项目.md
new file mode 100644
index 0000000..162a431
--- /dev/null
+++ b/kubernetes-MD/利用kubernetes部署网站项目.md
@@ -0,0 +1,335 @@
+
利用kubernetes部署网站项目
+
+著作:行癫 <盗版必究>
+
+------
+
+## 一:环境准备
+
+#### 1.kubernetes集群
+
+集群正常运行,例如使用以下命令检查
+
+```shell
+[root@master ~]# kubectl get node
+NAME STATUS ROLES AGE VERSION
+master Ready control-plane,master 5d19h v1.23.1
+node-1 Ready 5d19h v1.23.1
+node-2 Ready 5d19h v1.23.1
+node-3 Ready 5d19h v1.23.1
+```
+
+#### 2.harbor私有仓库
+
+主要给kubernetes集群提供镜像服务
+
+
+## 二:项目部署
+
+#### 1.镜像构建
+
+软件下载地址:
+
+```shell
+wget https://nginx.org/download/nginx-1.20.2.tar.gz
+```
+
+项目包下载地址:
+
+```shell
+git clone https://github.com/blackmed/xingdian-project.git
+```
+
+构建centos基础镜像Dockerfile文件:
+
+```shell
+root@nfs-harbor ~]# cat Dockerfile
+FROM daocloud.io/centos:7
+MAINTAINER "xingdianvip@gmail.com"
+ENV container docker
+RUN yum -y swap -- remove fakesystemd -- install systemd systemd-libs
+RUN yum -y update; yum clean all; \
+(cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
+rm -f /lib/systemd/system/multi-user.target.wants/*;\
+rm -f /etc/systemd/system/*.wants/*;\
+rm -f /lib/systemd/system/local-fs.target.wants/*; \
+rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
+rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
+rm -f /lib/systemd/system/basic.target.wants/*;\
+rm -f /lib/systemd/system/anaconda.target.wants/*;
+VOLUME [ "/sys/fs/cgroup" ]
+CMD ["/usr/sbin/init"]
+root@nfs-harbor ~]# docker bulid -t xingdian .
+```
+
+构建项目镜像:
+
+```shell
+[root@nfs-harbor nginx]# cat Dockerfile
+FROM xingdian
+ADD nginx-1.20.2.tar.gz /usr/local
+RUN rm -rf /etc/yum.repos.d/*
+COPY CentOS-Base.repo /etc/yum.repos.d/
+COPY epel.repo /etc/yum.repos.d/
+RUN yum clean all && yum makecache fast
+RUN yum -y install gcc gcc-c++ openssl openssl-devel pcre-devel zlib-devel make
+WORKDIR /usr/local/nginx-1.20.2
+RUN ./configure --prefix=/usr/local/nginx
+RUN make && make install
+WORKDIR /usr/local/nginx
+ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/nginx/sbin
+EXPOSE 80
+RUN rm -rf /usr/local/nginx/conf/nginx.conf
+COPY nginx.conf /usr/local/nginx/conf/
+RUN mkdir /dist
+CMD ["nginx", "-g", "daemon off;"]
+[root@nfs-harbor nginx]# docker build -t nginx:v2 .
+```
+
+注意:
+
+ 需要事先准备好Centos的Base仓库和epel仓库
+
+#### 2.上传项目到harbor
+
+修改镜像tag:
+
+```shell
+[root@nfs-harbor ~]# docker tag nginx:v2 10.0.0.230/xingdian/nginx:v2
+```
+
+登录私有仓库:
+
+```shell
+[root@nfs-harbor ~]# docker login 10.0.0.230
+Username: xingdian
+Password:
+```
+
+上传镜像:
+
+```shell
+[root@nfs-harbor ~]# docker push 10.0.0.230/xingdian/nginx:v2
+```
+
+注意:
+
+ 默认上传时采用https,因为我们部署的harbor使用的是http,所以再上传之前按照3-1进行修改
+
+#### 3.kubernetes集群连接harbor
+
+修改所有kubernetes集群能够访问http仓库,默认访问的是https
+
+```shell
+[root@master ~]# vim /etc/systemd/system/multi-user.target.wants/docker.service
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry 10.0.1.13 --containerd=/run/containerd/containerd.sock
+[root@master ~]# systemctl daemon-reload
+[root@master ~]# systemctl restart docker
+```
+
+kubernetes集群创建secret用于连接harbor
+
+```shell
+[root@master ~]# kubectl create secret docker-registry regcred --docker-server=10.0.0.230 --docker-username=diange --docker-password=QianFeng@123
+[root@master ~]# kubectl get secret
+NAME TYPE DATA AGE
+regcred kubernetes.io/dockerconfigjson 1 19h
+```
+
+注意:
+
+ regcred:secret的名字
+
+ --docker-server:指定服务器的地址
+
+ --docker-username:指定harbor的用户
+
+ --docker-password:指定harbor的密码
+
+#### 4.部署NFS
+
+部署NFS目的是为了给kubernetes集群提供持久化存储,kubernetes集群也要安装nfs-utils目的是为了支持nfs文件系统
+
+```shell
+[root@nfs-harbor ~]# yum -y install nfs-utils
+[root@nfs-harbor ~]# systemctl start nfs
+[root@nfs-harbor ~]# systemctl enable nfs
+```
+
+创建共享目录并对外共享
+
+```shell
+[root@nfs-harbor ~]# mkdir /kubernetes-1
+[root@nfs-harbor ~]# cat /etc/exports
+/kubernetes-1 *(rw,no_root_squash,sync)
+[root@nfs-harbor ~]# exportfs -rv
+```
+
+项目放入共享目录下
+
+```shell
+[root@nfs-harbor ~]# git clone https://github.com/blackmed/xingdian-project.git
+[root@nfs-harbor ~]# unzip dist.zip
+[root@nfs-harbor ~]# cp -r dist/* /kubernetes-1
+```
+
+#### 5.创建statefulset部署项目
+
+该yaml文件中除了statefulset以外还有service、PersistentVolume、StorageClass
+
+```shell
+[root@master xingdian]# cat Statefulset.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ app: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ name: web
+ targetPort: 80
+ nodePort: 30010
+ selector:
+ app: nginx
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: xingdian
+provisioner: example.com/external-nfs
+parameters:
+ server: 10.0.0.230
+ path: /kubernetes-1
+ readOnly: "false"
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: xingdian-1
+spec:
+ capacity:
+ storage: 1Gi
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: xingdian
+ nfs:
+ path: /kubernetes-1
+ server: 10.0.0.230
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: xingdian-2
+spec:
+ capacity:
+ storage: 1Gi
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: xingdian
+ nfs:
+ path: /kubernetes-1
+ server: 10.0.0.230
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: web
+spec:
+ selector:
+ matchLabels:
+ app: nginx
+ serviceName: "nginx"
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ terminationGracePeriodSeconds: 10
+ containers:
+ - name: nginx
+ image: 10.0.0.230/xingdian/nginx:v2
+ ports:
+ - containerPort: 80
+ name: web
+ volumeMounts:
+ - name: www
+ mountPath: /dist
+ volumeClaimTemplates:
+ - metadata:
+ name: www
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ storageClassName: "xingdian"
+ resources:
+ requests:
+ storage: 1Gi
+```
+
+#### 6.运行
+
+```shell
+[root@master xingdian]# kubectl create -f Statefulset.yaml
+service/nginx created
+storageclass.storage.k8s.io/xingdian created
+persistentvolume/xingdian-1 created
+persistentvolume/xingdian-2 created
+statefulset.apps/web created
+```
+
+## 三:项目验证
+
+#### 1.pv验证
+
+```shell
+[root@master xingdian]# kubectl get pv
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+xingdian-1 1Gi RWO Retain Bound default/www-web-1 xingdian 9m59s
+xingdian-2 1Gi RWO Retain Bound default/www-web-0 xingdian 9m59s
+```
+
+#### 2.pvc验证
+
+```shell
+[root@master xingdian]# kubectl get pvc
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+www-web-0 Bound xingdian-2 1Gi RWO xingdian 10m
+www-web-1 Bound xingdian-1 1Gi RWO xingdian 10m
+```
+
+#### 3.storageClass验证
+
+```shell
+[root@master xingdian]# kubectl get storageclass
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+xingdian example.com/external-nfs Delete Immediate false 10m
+```
+
+#### 4.statefulset验证
+
+```shell
+[root@master xingdian]# kubectl get statefulset
+NAME READY AGE
+web 2/2 13m
+[root@master xingdian]# kubectl get pod
+NAME READY STATUS RESTARTS AGE
+web-0 1/1 Running 0 13m
+web-1 1/1 Running 0 13m
+```
+
+#### 5.service验证
+
+```shell
+[root@master xingdian]# kubectl get svc
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+nginx NodePort 10.111.189.32 80:30010/TCP 13m
+```
+
+#### 6.浏览器访问
+
+
diff --git a/kubernetes-MD/基于Kubernetes构建ES集群.md b/kubernetes-MD/基于Kubernetes构建ES集群.md
new file mode 100644
index 0000000..7a91dce
--- /dev/null
+++ b/kubernetes-MD/基于Kubernetes构建ES集群.md
@@ -0,0 +1,314 @@
+基于Kubernetes集群构建ES集群
+
+作者:行癫(盗版必究)
+
+------
+
+## 一:环境准备
+
+#### 1.Kubernetes集群环境
+
+| 节点 | 地址 |
+| :---------------: | :---------: |
+| Kubernetes-Master | 10.9.12.206 |
+| Kubernetes-Node-1 | 10.9.12.205 |
+| Kubernetes-Node-2 | 10.9.12.204 |
+| Kubernetes-Node-3 | 10.9.12.203 |
+| DNS服务器 | 10.9.12.210 |
+| 代理服务器 | 10.9.12.209 |
+| NFS存储 | 10.9.12.250 |
+
+#### 2.Kuboard集群管理
+
+![image-20240420164922730](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420164922730.png)
+
+## 二:构建ES集群
+
+#### 1.持久化存储构建
+
+1.NFS服务器部署
+
+ 略
+
+2.创建共享目录
+
+ 本次采用脚本创建,脚本如下
+
+```shell
+[root@xingdiancloud-1 ~]# cat nfs.sh
+#!/bin/bash
+read -p "请输入您要创建的共享目录:" dir
+if [ -d $dir ];then
+ echo "请重新输入共享目录: "
+ read again_dir
+ mkdir $again_dir -p
+ echo "共享目录创建成功"
+ read -p "请输入共享对象:" ips
+ echo "$again_dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
+ xingdian=`cat /etc/exports |grep "$again_dir" |wc -l`
+ if [ $xingdian -eq 1 ];then
+ echo "成功配置共享"
+ exportfs -rv >/dev/null
+ exit
+ else
+ exit
+ fi
+else
+ mkdir $dir -p
+ echo "共享目录创建成功"
+ read -p "请输入共享对象:" ips
+ echo "$dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
+ xingdian=`cat /etc/exports |grep "$dir" |wc -l`
+ if [ $xingdian -eq 1 ];then
+ echo "成功配置共享"
+ exportfs -rv >/dev/null
+ exit
+ else
+ exit
+ fi
+fi
+```
+
+3.创建存储类
+
+```yaml
+[root@xingdiancloud-master ~]# vim namespace.yaml
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: logging
+[root@xingdiancloud-master ~]# vim storageclass.yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ annotations:
+ k8s.kuboard.cn/storageNamespace: logging
+ k8s.kuboard.cn/storageType: nfs_client_provisioner
+ name: data-es
+parameters:
+ archiveOnDelete: 'false'
+provisioner: nfs-data-es
+reclaimPolicy: Retain
+volumeBindingMode: Immediate
+```
+
+4.创建存储卷
+
+```yaml
+[root@xingdiancloud-master ~]# vim persistenVolume.yaml
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ annotations:
+ pv.kubernetes.io/bound-by-controller: 'yes'
+ finalizers:
+ - kubernetes.io/pv-protection
+ name: nfs-pv-data-es
+spec:
+ accessModes:
+ - ReadWriteMany
+ capacity:
+ storage: 100Gi
+ claimRef:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ name: nfs-pvc-data-es
+ namespace: kube-system
+ nfs:
+ path: /data/es-data
+ server: 10.9.12.250
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: nfs-storageclass-provisioner
+ volumeMode: Filesystem
+```
+
+注意:存储类和存储卷也可以使用Kuboard界面创建
+
+#### 2.设定节点标签
+
+```shell
+[root@xingdiancloud-master ~]# kubectl label nodes xingdiancloud-node-1 es=log
+```
+
+注意:
+
+ 所有运行ES的节点需要进行标签的设定
+
+ 目的配合接下来的StatefulSet部署ES集群
+
+#### 3.ES集群部署
+
+ 注意:由于ES集群每个节点需要唯一的网络标识,并需要持久化存储,Deployment不能实现该特点只能进行无状态应用的部署,故本次将采用StatefulSet进行部署。
+
+```yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: es
+ namespace: logging
+spec:
+ serviceName: elasticsearch
+ replicas: 3
+ selector:
+ matchLabels:
+ app: elasticsearch
+ template:
+ metadata:
+ labels:
+ app: elasticsearch
+ spec:
+ nodeSelector:
+ es: log
+ initContainers:
+ - name: increase-vm-max-map
+ image: busybox
+ command: ["sysctl", "-w", "vm.max_map_count=262144"]
+ securityContext:
+ privileged: true
+ - name: increase-fd-ulimit
+ image: busybox
+ command: ["sh", "-c", "ulimit -n 65536"]
+ securityContext:
+ privileged: true
+ containers:
+ - name: elasticsearch
+ image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
+ ports:
+ - name: rest
+ containerPort: 9200
+ - name: inter
+ containerPort: 9300
+ resources:
+ limits:
+ cpu: 500m
+ memory: 4000Mi
+ requests:
+ cpu: 500m
+ memory: 3000Mi
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/elasticsearch/data
+ env:
+ - name: cluster.name
+ value: k8s-logs
+ - name: node.name
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: cluster.initial_master_nodes
+ value: "es-0,es-1,es-2"
+ - name: discovery.zen.minimum_master_nodes
+ value: "2"
+ - name: discovery.seed_hosts
+ value: "elasticsearch"
+ - name: ESJAVAOPTS
+ value: "-Xms512m -Xmx512m"
+ - name: network.host
+ value: "0.0.0.0"
+ - name: node.max_local_storage_nodes
+ value: "3"
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ labels:
+ app: elasticsearch
+ spec:
+ accessModes: [ "ReadWriteMany" ]
+ storageClassName: data-es
+ resources:
+ requests:
+ storage: 25Gi
+```
+
+#### 4.创建Services发布ES集群
+
+```yaml
+[root@xingdiancloud-master ~]# vim elasticsearch-svc.yaml
+kind: Service
+apiVersion: v1
+metadata:
+ name: elasticsearch
+ namespace: logging
+ labels:
+ app: elasticsearch
+spec:
+ selector:
+ app: elasticsearch
+ type: NodePort
+ ports:
+ - port: 9200
+ targetPort: 9200
+ nodePort: 30010
+ name: rest
+ - port: 9300
+ name: inter-node
+```
+
+#### 5.访问测试
+
+注意:
+
+ 使用elasticVUE插件访问集群
+
+ 集群状态正常
+
+ 集群所有节点正常
+
+![image-20240420172247845](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420172247845.png)
+
+## 三:代理及DNS配置
+
+#### 1.代理配置
+
+注意:
+
+ 部署略
+
+ 在此使用Nginx作为代理
+
+ 基于用户的访问控制用户和密码自行创建(htpasswd)
+
+ 配置文件如下
+
+```shell
+[root@proxy ~]# cat /etc/nginx/conf.d/elasticsearch.conf
+server {
+ listen 80;
+ server_name es.xingdian.com;
+ location / {
+ auth_basic "xingdiancloud kibana";
+ auth_basic_user_file /etc/nginx/pass;
+ proxy_pass http://地址+端口;
+
+ }
+
+
+}
+```
+
+#### 2.域名解析配置
+
+注意:
+
+ 部署略
+
+ 配置如下
+
+```shell
+[root@www ~]# cat /var/named/xingdian.com.zone
+$TTL 1D
+@ IN SOA @ rname.invalid. (
+ 0 ; serial
+ 1D ; refresh
+ 1H ; retry
+ 1W ; expire
+ 3H ) ; minimum
+ NS @
+ A DNS地址
+es A 代理地址
+ AAAA ::1
+```
+
+#### 3.访问测试
+
+ 略
\ No newline at end of file
diff --git a/kubernetes-MD/基于kubeadm部署kubernetes集群.md b/kubernetes-MD/基于kubeadm部署kubernetes集群.md
new file mode 100644
index 0000000..cc91289
--- /dev/null
+++ b/kubernetes-MD/基于kubeadm部署kubernetes集群.md
@@ -0,0 +1,217 @@
+基于kubeadm部署kubernetes集群
+
+著作:行癫 <盗版必究>
+
+------
+
+## 一:环境准备
+
+三台服务器,一台master,两台node,master节点必须是2核cpu
+
+| 节点名称 | IP地址 |
+| :------: | :--------: |
+| master | 10.0.0.220 |
+| node-1 | 10.0.0.221 |
+| node-2 | 10.0.0.222 |
+| node-3 | 10.0.0.223 |
+
+#### 1.所有服务器关闭防火墙和selinux
+
+```shell
+[root@localhost ~]# systemctl stop firewalld
+[root@localhost ~]# systemctl disable firewalld
+[root@localhost ~]# setenforce 0
+[root@localhost ~]# sed -i '/^SELINUX=/c SELINUX=disabled/' /etc/selinux/config
+[root@localhost ~]# swapoff -a 临时关闭
+[root@localhost ~]# sed -i 's/.*swap.*/#&/' /etc/fstab 永久关闭
+注意:
+ 关闭所有服务器的交换分区
+ 所有节点操作
+```
+
+#### 2.保证yum仓库可用
+
+```shell
+[root@localhost ~]# yum clean all
+[root@localhost ~]# yum makecache fast
+注意:
+ 使用国内yum源
+ 所有节点操作
+```
+
+#### 3.修改主机名
+
+```shell
+[root@localhost ~]# hostnamectl set-hostname master
+[root@localhost ~]# hostnamectl set-hostname node-1
+[root@localhost ~]# hostnamectl set-hostname node-2
+[root@localhost ~]# hostnamectl set-hostname node-3
+注意:
+ 所有节点操作
+```
+
+#### 4.添加本地解析
+
+```shell
+[root@master ~]# cat >> /etc/hosts <> /etc/yum.repos.d/kubernetes.repo </etc/sysconfig/kubelet<> /etc/sysctl.conf < 4m45s v1.23.1
+node-2 Ready 4m40s v1.23.1
+node-3 Ready 4m46s v1.23.1
+```
+
+
+
+
+
+
+
+
+
diff --git a/kubernetes-MD/基于kubeadm部署kubernetes集群1-25版本.md b/kubernetes-MD/基于kubeadm部署kubernetes集群1-25版本.md
new file mode 100644
index 0000000..3a08218
--- /dev/null
+++ b/kubernetes-MD/基于kubeadm部署kubernetes集群1-25版本.md
@@ -0,0 +1,953 @@
+基于kubeadm部署kubernetes集群
+
+著作:行癫 <盗版必究>
+
+------
+
+## 一:环境准备
+
+三台服务器,一台master,两台node,master节点必须是2核cpu
+
+| 节点名称 | IP地址 |
+| :------: | :--------: |
+| master | 10.0.0.220 |
+| node-1 | 10.0.0.221 |
+| node-2 | 10.0.0.222 |
+| node-3 | 10.0.0.223 |
+
+#### 1.所有服务器关闭防火墙和selinux
+
+```shell
+[root@localhost ~]# systemctl stop firewalld
+[root@localhost ~]# systemctl disable firewalld
+[root@localhost ~]# setenforce 0
+[root@localhost ~]# sed -i '/^SELINUX=/c SELINUX=disabled/' /etc/selinux/config
+[root@localhost ~]# swapoff -a 临时关闭
+[root@localhost ~]# sed -i 's/.*swap.*/#&/' /etc/fstab 永久关闭
+注意:
+ 关闭所有服务器的交换分区
+ 所有节点操作
+```
+
+#### 2.保证yum仓库可用
+
+```shell
+[root@localhost ~]# yum clean all
+[root@localhost ~]# yum makecache fast
+注意:
+ 使用国内yum源
+ 所有节点操作
+```
+
+#### 3.修改主机名
+
+```shell
+[root@localhost ~]# hostnamectl set-hostname master
+[root@localhost ~]# hostnamectl set-hostname node-1
+[root@localhost ~]# hostnamectl set-hostname node-2
+[root@localhost ~]# hostnamectl set-hostname node-3
+注意:
+ 所有节点操作
+```
+
+#### 4.添加本地解析
+
+```shell
+[root@master ~]# cat >> /etc/hosts <> /etc/yum.repos.d/kubernetes.repo <> /etc/yum.repos.d/kubernetes.repo <> /etc/sysctl.conf < 2m7s v1.25.0
+node-2 Ready 50s v1.25.0
+node-3 Ready 110s v1.25.0
+```
+
+## 三:部署Dashboard
+
+#### 1.kube-proxy 开启 ipvs
+
+```shell
+[root@master ~]# kubectl get configmap kube-proxy -n kube-system -o yaml > kube-proxy-configmap.yaml
+[root@master ~]# sed -i 's/mode: ""/mode: "ipvs"/' kube-proxy-configmap.yaml
+[root@master ~]# kubectl apply -f kube-proxy-configmap.yaml
+[root@master ~]# rm -f kube-proxy-configmap.yaml
+[root@master ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
+```
+
+#### 2.Dashboard安装脚本
+
+```shell
+[root@master ~]# cat dashboard.yaml
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-key-holder
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-settings
+ namespace: kubernetes-dashboard
+
+---
+
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+rules:
+ # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Allow Dashboard to get metrics.
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+rules:
+ # Allow Metrics Scraper to get metrics from the Metrics server
+ - apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list", "watch"]
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ containers:
+ - name: kubernetes-dashboard
+ image: kubernetesui/dashboard:v2.6.1
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ # Uncomment the following line to manually specify Kubernetes API server Host
+ # If not specified, Dashboard will attempt to auto discover the API server and connect
+ # to it. Uncomment only if the default does not work.
+ # - --apiserver-host=http://my-address:port
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ # Create on-disk volume to store exec logs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ # Comment the following tolerations if Dashboard must not be deployed on master
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+
+---
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ containers:
+ - name: dashboard-metrics-scraper
+ image: kubernetesui/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ # Comment the following tolerations if Dashboard must not be deployed on master
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+```
+
+#### 3.创建证书
+
+```shell
+[root@k8s-master ~]# mkdir dashboard-certs
+[root@k8s-master ~]# cd dashboard-certs/
+#创建命名空间
+[root@k8s-master ~]# kubectl create namespace kubernetes-dashboard
+# 创建私钥key文件
+[root@k8s-master ~]# openssl genrsa -out dashboard.key 2048
+#证书请求
+[root@k8s-master ~]# openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
+#自签证书
+[root@k8s-master ~]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
+#创建kubernetes-dashboard-certs对象
+[root@k8s-master ~]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
+```
+
+#### 4.创建管理员
+
+```shell
+创建账户
+[root@k8s-master ~]# vim dashboard-admin.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: dashboard-admin
+ namespace: kubernetes-dashboard
+#保存退出后执行
+[root@k8s-master ~]# kubectl create -f dashboard-admin.yaml
+为用户分配权限
+[root@k8s-master ~]# vim dashboard-admin-bind-cluster-role.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: dashboard-admin-bind-cluster-role
+ labels:
+ k8s-app: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- kind: ServiceAccount
+ name: dashboard-admin
+ namespace: kubernetes-dashboard
+#保存退出后执行
+[root@k8s-master ~]# kubectl create -f dashboard-admin-bind-cluster-role.yaml
+```
+
+#### 5.安装 Dashboard
+
+```shell
+[root@master dashboard-certs]# kubectl create -f dashboard.yaml
+```
+
+#### 6.获取token
+
+```shell
+[root@master dashboard-certs]# kubectl -n kubernetes-dashboard create token dashboard-admin
+eyJhbGciOiJSUzI1NiIsImtpZCI6InlBck13aTFMR2daR3htTmxSdG5XbGJjOVFLWmdMZlgzRU10TmJWRFNEMk0ifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYyMjE0MTA3LCJpYXQiOjE2NjIyMTA1MDcsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkYXNoYm9hcmQtYWRtaW4iLCJ1aWQiOiIwOTRhYWI2NC05NTkyLTRjYTctOWI3MS0yNDEwMmI5ODA1YjcifX0sIm5iZiI6MTY2MjIxMDUwNywic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.MQfc83l08PsAqmBHRzwgE_TsZGct0Sul1lM7Ks0ssf29DtXt22u9hHisvaLmQ64sNsvb_D7r47kDDTxZPbIJP_A2mBuHFqy_dnryUmqlTj7KFJm4PdObwiMTlnBch-v7HqxJKLuA6XXLxtpNrbLWqqG47Bc2kvvcF4BzSkiDhe-s5L0PS-WY753QjV0C9v63G8KJDxkQEGVC4PjqfXSclLi_jvIe4n3UqhUNHPl85JWgBhJHTTAei3Ztp7IMweztR_P30p6BiXEF0Kmcv8Nb7Xsk2dx5avYyiTRZTpq4pBkvAMKlCbXyKufh78mil_oNdaA8Q_AeFWFwgDx9UrGoFA
+```
+
+#### 7.创建kubeconfig
+
+```shell
+获取certificate-authority-data:
+[root@master ~]# CURRENT_CONTEXT=$(kubectl config current-context)
+[root@master ~]# CURRENT_CLUSTER=$(kubectl config view --raw -o=go-template='{{range .contexts}}{{if eq .name "'''${CURRENT_CONTEXT}'''"}}{{ index .context "cluster" }}{{end}}{{end}}')
+[root@master ~]# kubectl config view --raw -o=go-template='{{range .clusters}}{{if eq .name "'''${CURRENT_CLUSTER}'''"}}"{{with index .cluster "certificate-authority-data" }}{{.}}{{end}}"{{ end }}{{ end }}'
+"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1Ea3dNekEzTVRBeE0xb1hEVE15TURnek1UQTNNVEF4TTFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVNZCnRxdjVwdlVaQVN3VHl0ODkrWGphM1ZuZFZnaWVCbFc1bEZVc2dzSklxa0tSNlV2cjVYcXEvWjNOaUVpUlBqT28KWkh4a1V5SWpqdUFTUXZuYzhrTXhvNjNQY3d2UUNEYzd3V1pQeVBxMDVobFZhUlhYK0hHNjlaRXozYkQrUmlObgpyTU5uSVZqeEI0ck56SWs0cGFUNjBZMU5hdWx0V01NbEFyMFM3ZC9YQ3ZMeVhBK0NCNVFmZ2xSQTFJZnJ3ZjNJCno3YS9iQ0M4Qk9Fak94QmllRCtra3JYWGJtdXlMUHpTZkdKUGNUajI1eGdjK2RvNDJZKzZ4UUVCK0ZTSnN6VWIKVzhyMkx5TkI2YjNaZlBZcjNIMXQ4RkxkeUxtTU9nR1M2RkpPMmpQWVVWR0RObURLUHlPZWJMVit4UXlvMW4rMQpYK0F5NzJ1b2JlbklESE54czhzQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZMVXJSUnZXN2VrRlJhajRFRDFuWmVONXJzNVRNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ2lucC9OakRCOGhLbFhuMzJCTQorV0hLcTRTOHNCTFBFeFhJSzloUXdWNmVodWgyOEQzSEltOFlFUCtJazEybDUwNi90QlNpYllOYjV1dHYyVnBmCmltUEh2aFAvUzZmTFE4MXVIL2JaQytaMlJ3b3VvLzU4TkJoZVRhR2ozK2VXTzNnMDllaEhaaHVFajE4WWVtaDYKU0xhUU9SZWE2dEpHVjNlVURWUk44Tnc1aXp2T3AxZ2poVHdsNzJTL3JycmxoREo2dGM4VDlPaUFhOWNkeVlPWApLNVZNdlEwRWw3aDlLT3lmN3FsSWgxL0dhSWdqVUl4Z3FNQ1lIallKc0Jvd2g2eDB5d0tSYllJQkl4d1M0NDNECnRmYU5wOTBpUFVGS0Q3c2IvTWxGeDZpK3l3UjVnQUd3NWJWSEdIZTMrY0szNzlRd1R2NS8zdWlYQTlBUnhiVloKS0hNPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
+
+[root@master ~]# cat config
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1EVXhNakV3TXpReU5Gb1hEVE14TURVeE1ERXdNelF5TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTFd5CmFZLy85Nlc5R2hxWWVBVER5cXhQV2tmVE1mSUxNMFkydy9RSm9SRDJWZmw1cjFNR25mRWNteG81bXc4Z1NXMmEKdVNmeml6dU41eDRMblBBYlVvbHdubkxMeWx3cENmTkRKMFRUVFFyOThuTmphWWQ0d2RmK09uZmtZQ1VaVG1NTwpYWDZBMEZJblFHTEpWQUdOb0xUUnVIR3F6dU0yNUd1Rkp5aXBoNlhRN0tHcmJFVFo0RXQyVWg3azV2UGxPVDQ3CkNEQjlDVkMra1c5MkdIRmNrTkViUU9kTUpPTkZXalh1K1lsSjlZdzNybzhJYU05QVR5SDFwNmNzaXNoejhybVQKUGZEUkl4cXpsNTVzenJNUHV5Y0JqZkVEZXFhcjQ2OXJyMFFGcWJ5NTdaaEtBcGMybTA0eTZ6ZllUQlB4cDhndwpXZ01ONktkcjU4bEFpOERwN3kwQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZERTBCTVRreE5BYlpmMGZ6S2JwRm5ZUU94aXpNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCdE1yU281K2JlcEF2UGpwOUJ4TDlncnFtTnpOVEczRWFHZnZKUndoUE5qVXZ1bDZFZQpNenlIM3o3SERzYW5kSHJMU2xYMGZXZ2Y3TFRBSVRLV3duTDI3NjVzaTJ4L0prb2pCV0VySytGTEs1dGZXbmdmCnZ4cE13eE83RVNYd0FwVDdKWk9iVVA0eStPc3k2VzhQQXRjeHFGUHdyaVNhM29KYnZwZEFvcHloZXdoNUxNcUwKWkpobU4wK1RhTnlOaVJXaEkwcnZOSGYrNEdtcjhMd2lzMXZPdGZ4b2FtcGhPWE8wS0NheEJ0MWoxcDQxK2pNTwo0UUhlRWtFTkJndERzMUtuMTRhbFR6NWI1cFg0amwrU2tOOFk0ZlppUk9SRXJ4cWVmVkJEZ1Z0aWtQbEp5VFkyCmRQMkJOSE82R1FjalVvZmZBTmwwK1ZkblJTWGEvNVRsZU1PUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ server: https://10.0.2.150:6443
+ name: kubernetes
+contexts:
+- context:
+ cluster: kubernetes
+ user: dashboard-admin
+ name: dashboard-admin@kubernetes
+current-context: dashboard-admin@kubernetes
+kind: Config
+preferences: {}
+users:
+- name: dashboard-admin
+ user:
+ token: eyJhbGciOiJSUzI1NiIsImtpZCI6InlBck13aTFMR2daR3htTmxSdG5XbGJjOVFLWmdMZlgzRU10TmJWRFNEMk0ifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYyMjE0MjcwLCJpYXQiOjE2NjIyMTA2NzAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkYXNoYm9hcmQtYWRtaW4iLCJ1aWQiOiIwOTRhYWI2NC05NTkyLTRjYTctOWI3MS0yNDEwMmI5ODA1YjcifX0sIm5iZiI6MTY2MjIxMDY3MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmRhc2hib2FyZC1hZG1pbiJ9.bHK9jgpCAQIAxwurh05zzndo22hWEzoDnfFRS3VDWAfoD0YOsTF6RbHFSshn0Vm-Xv1sEIgmVkjgftP2Pq_saMs-WdgHfTLz2CjxWpkYV4WQcMs4WJq9Lx5SQeNxw9mEh8c085nnx368GWkENHSsldKP-O6YliWQAP8qpOiUWrJqhteVQi0GD7EYmOPlnKFZF2YKaROYFvn9P8JiCL8rRTZ5GUYIty9LRLkh3daFXj67krk4v3pNLqdHcKKwkv8vFN4hl6RbgA3nY
+```
+
diff --git a/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md b/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
new file mode 100644
index 0000000..9321ba6
--- /dev/null
+++ b/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
@@ -0,0 +1,684 @@
+基于kubernetes部署Prometheus和Grafana
+
+著作:行癫 <盗版必究>
+
+------
+
+## 一:环境准备
+
+#### 1.kubernetes集群正常
+
+```shell
+[root@master ~]# kubectl get node
+NAME STATUS ROLES AGE VERSION
+master Ready control-plane,master 36d v1.23.1
+node-1 Ready 36d v1.23.1
+node-2 Ready 36d v1.23.1
+node-3 Ready 36d v1.23.1
+```
+
+#### 2.harbor仓库正常
+
+![image-20220602010601512](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602010601512.png)
+
+## 二:Prometheus部署
+
+#### 1.node-exporter部署
+
+ node-exporter可以采集机器(物理机、虚拟机、云主机等)的监控指标数据,能够采集到的指标包括CPU, 内存,磁盘,网络,文件数等信息
+
+创建监控namespace:
+
+```shell
+[root@master ~]# kubectl create ns monitor-sa
+```
+
+创建node-export.yaml:
+
+```shell
+[root@master ~]# vim node-export.yaml
+apiVersion: apps/v1
+kind: DaemonSet # 可以保证k8s集群的每个节点都运行完全一样的pod
+metadata:
+ name: node-exporter
+ namespace: monitor-sa
+ labels:
+ name: node-exporter
+spec:
+ selector:
+ matchLabels:
+ name: node-exporter
+ template:
+ metadata:
+ labels:
+ name: node-exporter
+ spec:
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+ containers:
+ - name: node-exporter
+ image: prom/node-exporter:v0.16.0
+ #image: 10.0.0.230/xingdian/node-exporter:v0.16.0
+ ports:
+ - containerPort: 9100
+ resources:
+ requests:
+ cpu: 0.15 # 这个容器运行至少需要0.15核cpu
+ securityContext:
+ privileged: true # 开启特权模式
+ args:
+ - --path.procfs
+ - /host/proc
+ - --path.sysfs
+ - /host/sys
+ - --collector.filesystem.ignored-mount-points
+ - '"^/(sys|proc|dev|host|etc)($|/)"'
+ volumeMounts:
+ - name: dev
+ mountPath: /host/dev
+ - name: proc
+ mountPath: /host/proc
+ - name: sys
+ mountPath: /host/sys
+ - name: rootfs
+ mountPath: /rootfs
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Exists"
+ effect: "NoSchedule"
+ volumes:
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sys
+ hostPath:
+ path: /sys
+ - name: rootfs
+ hostPath:
+ path: /
+```
+
+注意:
+
+ hostNetwork、hostIPC、hostPID都为True时,表示这个Pod里的所有容器,会直接使用宿主机的网络,直接与宿主机进行IPC(进程间通信)通信,可以看到宿主机里正在运行的所有进程。加入了hostNetwork:true会直接将我们的宿主机的9100端口映射出来,从而不需要创建service 在我们的宿主机上就会有一个9100的端口
+
+创建:
+
+```shell
+[root@master ~]# kubectl apply -f node-export.yaml
+```
+
+查看node-exporter是否部署成功:
+
+```shell
+[root@master ~]# kubectl get pods -n monitor-sa -o wide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+node-exporter-2cbrg 1/1 Running 0 34m 10.0.0.220 master
+node-exporter-7rrbh 1/1 Running 0 34m 10.0.0.222 node-2
+node-exporter-96v29 1/1 Running 0 34m 10.0.0.221 node-1
+node-exporter-bf2j8 1/1 Running 0 34m 10.0.0.223 node-3
+```
+
+注意:
+
+ node-export默认的监听端口是9100,可以看到当前主机获取到的所有监控数据
+
+```shell
+[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_cpu_seconds
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+ 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_cpu_seconds_total Seconds the cpus spent in each mode.
+# TYPE node_cpu_seconds_total counter
+node_cpu_seconds_total{cpu="0",mode="idle"} 8398.49
+node_cpu_seconds_total{cpu="0",mode="iowait"} 1.54
+node_cpu_seconds_total{cpu="0",mode="irq"} 0
+node_cpu_seconds_total{cpu="0",mode="nice"} 0
+node_cpu_seconds_total{cpu="0",mode="softirq"} 17.2
+node_cpu_seconds_total{cpu="0",mode="steal"} 0
+node_cpu_seconds_total{cpu="0",mode="system"} 70.61
+node_cpu_seconds_total{cpu="0",mode="user"} 187.04
+node_cpu_seconds_total{cpu="1",mode="idle"} 8403.82
+node_cpu_seconds_total{cpu="1",mode="iowait"} 4.95
+node_cpu_seconds_total{cpu="1",mode="irq"} 0
+node_cpu_seconds_total{cpu="1",mode="nice"} 0
+node_cpu_seconds_total{cpu="1",mode="softirq"} 16.75
+node_cpu_seconds_total{cpu="1",mode="steal"} 0
+node_cpu_seconds_total{cpu="1",mode="system"} 71.26
+node_cpu_seconds_total{cpu="1",mode="user"} 190.27
+100 74016 100 74016 0 0 5878k 0 --:--:-- --:--:-- --:--:-- 6023k
+
+[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_load
+ % Total % Received % Xferd Average Speed Time Time Time Current
+ Dload Upload Total Spent Left Speed
+ 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_loa
+1 1m load average.
+# TYPE node_load1 gauge
+node_load1 0.2
+# HELP node_load15 15m load average.
+# TYPE node_load15 gauge
+node_load15 0.22
+# HELP node_load5 5m load average.
+# TYPE node_load5 gauge
+node_load5 0.2
+100 74044 100 74044 0 0 8604k 0 --:--:-- --:--:-- --:--:-- 9038k
+```
+
+#### 2.Prometheus安装
+
+创建sa账号,对sa做rbac授权:
+
+```shell
+# 创建一个sa账号monitor
+[root@master ~]# kubectl create serviceaccount monitor -n monitor-sa
+
+# 把sa账号monitor通过clusterrolebing绑定到clusterrole上
+[root@master ~]# kubectl create clusterrolebinding monitor-clusterrolebinding -n monitor-sa --clusterrole=cluster-admin --serviceaccount=monitor-sa:monitor
+```
+
+创建prometheus数据存储目录:
+
+```shell
+# 将prometheus调度到node-1节点
+[root@node-1 ~]# mkdir /data && chmod 777 /data
+```
+
+创建一个configmap存储卷,用来存放prometheus配置信息:
+
+```shell
+[root@master ~]# vim prometheus-cfg.yaml
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ labels:
+ app: prometheus
+ name: prometheus-config
+ namespace: monitor-sa
+data:
+ prometheus.yml: |
+ global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 1m
+ scrape_configs:
+ - job_name: 'kubernetes-node'
+ kubernetes_sd_configs:
+ - role: node
+ relabel_configs:
+ - source_labels: [__address__]
+ regex: '(.*):10250'
+ replacement: '${1}:9100'
+ target_label: __address__
+ action: replace
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+ - job_name: 'kubernetes-node-cadvisor'
+ kubernetes_sd_configs:
+ - role: node
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+ - target_label: __address__
+ replacement: kubernetes.default.svc:443
+ - source_labels: [__meta_kubernetes_node_name]
+ regex: (.+)
+ target_label: __metrics_path__
+ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
+ - job_name: 'kubernetes-apiserver'
+ kubernetes_sd_configs:
+ - role: endpoints
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - job_name: 'kubernetes-service-endpoints'
+ kubernetes_sd_configs:
+ - role: endpoints
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: ([^:]+)(?::\d+)?;(\d+)
+ replacement: $1:$2
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+```
+
+创建:
+
+```shell
+[root@master ~]# kubectl apply -f prometheus-cfg.yaml
+configmap/prometheus-config created
+```
+
+配置详解:
+
+```shell
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ labels:
+ app: prometheus
+ name: prometheus-config
+ namespace: monitor-sa
+data:
+ prometheus.yml: |
+ global:
+ scrape_interval: 15s #采集目标主机监控据的时间间隔
+ scrape_timeout: 10s # 数据采集超时时间,默认10s
+ evaluation_interval: 1m #触发告警检测的时间,默认是1m
+ scrape_configs: # 配置数据源,称为target,每个target用job_name命名。又分为静态配置和服务发现
+ - job_name: 'kubernetes-node'
+ kubernetes_sd_configs: # 使用的是k8s的服务发现
+ - role: node # 使用node角色,它使用默认的kubelet提供的http端口来发现集群中每个node节点
+ relabel_configs: # 重新标记
+ - source_labels: [__address__] # 配置的原始标签,匹配地址
+ regex: '(.*):10250' #匹配带有10250端口的url
+ replacement: '${1}:9100' #把匹配到的ip:10250的ip保留
+ target_label: __address__ #新生成的url是${1}获取到的ip:9100
+ action: replace # 动作替换
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+) #匹配到下面正则表达式的标签会被保留,如果不做regex正则的话,默认只是会显示instance标签
+ - job_name: 'kubernetes-node-cadvisor' # 抓取cAdvisor数据,是获取kubelet上/metrics/cadvisor接口数据来获取容器的资源使用情况
+ kubernetes_sd_configs:
+ - role: node
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ relabel_configs:
+ - action: labelmap # 把匹配到的标签保留
+ regex: __meta_kubernetes_node_label_(.+) #保留匹配到的具有__meta_kubernetes_node_label的标签
+ - target_label: __address__ # 获取到的地址:__address__="192.168.40.180:10250"
+ replacement: kubernetes.default.svc:443 # 把获取到的地址替换成新的地址kubernetes.default.svc:443
+ - source_labels: [__meta_kubernetes_node_name]
+ regex: (.+) # 把原始标签中__meta_kubernetes_node_name值匹配到
+ target_label: __metrics_path__ #获取__metrics_path__对应的值
+ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
+ # 把metrics替换成新的值api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
+ # ${1}是__meta_kubernetes_node_name获取到的值
+ # 新的url就是https://kubernetes.default.svc:443/api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
+ - job_name: 'kubernetes-apiserver'
+ kubernetes_sd_configs:
+ - role: endpoints # 使用k8s中的endpoint服务发现,采集apiserver 6443端口获取到的数据
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ # endpoint这个对象的名称空间,endpoint对象的服务名,exnpoint的端口名称
+ action: keep # 采集满足条件的实例,其他实例不采集
+ regex: default;kubernetes;https #正则匹配到的默认空间下的service名字是kubernetes,协议是https的endpoint类型保留下来
+ - job_name: 'kubernetes-service-endpoints'
+ kubernetes_sd_configs:
+ - role: endpoints
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ # 重新打标仅抓取到的具有 "prometheus.io/scrape: true" 的annotation的端点,意思是说如果某个service具有prometheus.io/scrape = true annotation声明则抓取,annotation本身也是键值结构,所以这里的源标签设置为键,而regex设置值true,当值匹配到regex设定的内容时则执行keep动作也就是保留,其余则丢弃。
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ # 重新设置scheme,匹配源标签__meta_kubernetes_service_annotation_prometheus_io_scheme也就是prometheus.io/scheme annotation,如果源标签的值匹配到regex,则把值替换为__scheme__对应的值。
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ # 应用中自定义暴露的指标,也许你暴露的API接口不是/metrics这个路径,那么你可以在这个POD对应的service中做一个"prometheus.io/path = /mymetrics" 声明,上面的意思就是把你声明的这个路径赋值给__metrics_path__,其实就是让prometheus来获取自定义应用暴露的metrices的具体路径,不过这里写的要和service中做好约定,如果service中这样写 prometheus.io/app-metrics-path: '/metrics' 那么你这里就要__meta_kubernetes_service_annotation_prometheus_io_app_metrics_path这样写。
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: ([^:]+)(?::\d+)?;(\d+)
+ replacement: $1:$2
+ # 暴露自定义的应用的端口,就是把地址和你在service中定义的 "prometheus.io/port = " 声明做一个拼接,然后赋值给__address__,这样prometheus就能获取自定义应用的端口,然后通过这个端口再结合__metrics_path__来获取指标,如果__metrics_path__值不是默认的/metrics那么就要使用上面的标签替换来获取真正暴露的具体路径。
+ - action: labelmap #保留下面匹配到的标签
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace # 替换__meta_kubernetes_namespace变成kubernetes_namespace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+```
+
+通过deployment部署prometheus:
+
+```shell
+[root@master ~]# cat prometheus-deploy.yaml
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus-server
+ namespace: monitor-sa
+ labels:
+ app: prometheus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: prometheus
+ component: server
+ #matchExpressions:
+ #- {key: app, operator: In, values: [prometheus]}
+ #- {key: component, operator: In, values: [server]}
+ template:
+ metadata:
+ labels:
+ app: prometheus
+ component: server
+ annotations:
+ prometheus.io/scrape: 'false'
+ spec:
+ nodeName: node-1 # 指定pod调度到哪个节点上
+ serviceAccountName: monitor
+ containers:
+ - name: prometheus
+ image: prom/prometheus:v2.2.1
+ #image: 10.0.0.230/xingdian/prometheus:v2.2.1
+ imagePullPolicy: IfNotPresent
+ command:
+ - prometheus
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus # 数据存储目录
+ - --storage.tsdb.retention=720h # 数据保存时长
+ - --web.enable-lifecycle # 开启热加载
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/prometheus/prometheus.yml
+ name: prometheus-config
+ subPath: prometheus.yml
+ - mountPath: /prometheus/
+ name: prometheus-storage-volume
+ volumes:
+ - name: prometheus-config
+ configMap:
+ name: prometheus-config
+ items:
+ - key: prometheus.yml
+ path: prometheus.yml
+ mode: 0644
+ - name: prometheus-storage-volume
+ hostPath:
+ path: /data
+ type: Directory
+```
+
+创建:
+
+```shell
+[root@master ~]# kubectl apply -f prometheus-deploy.yaml
+deployment.apps/prometheus-server created
+```
+
+查看:
+
+```shell
+[root@master ~]# kubectl get pods -o wide -n monitor-sa
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+prometheus-server-59cb5d648-bxwrb 1/1 Running 0 14m 10.244.2.100 node-1
+```
+
+#### 3.prometheus pod创建service
+
+```shell
+[root@master ~]# cat prometheus-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: prometheus
+ namespace: monitor-sa
+ labels:
+ app: prometheus
+spec:
+ type: NodePort
+ ports:
+ - port: 9090
+ targetPort: 9090
+ protocol: TCP
+ selector:
+ app: prometheus
+ component: server
+```
+
+创建:
+
+```shell
+[root@master ~]# kubectl apply -f prometheus-svc.yaml
+service/prometheus created
+```
+
+查看service在物理机映射的端口:
+
+```shell
+[root@master ~]# kubectl get svc -n monitor-sa
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+prometheus NodePort 10.106.61.80 9090:32169/TCP 32m
+```
+
+#### 4.web界面查看
+
+![image-20220602011956600](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602011956600.png)
+
+![image-20220602012012382](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602012012382.png)
+
+#### 5.Prometheus热加载
+
+```shell
+# 为了每次修改配置文件可以热加载prometheus,也就是不停止prometheus,就可以使配置生效,想要使配置生效可用如下热加载命令:
+[root@master ~]# kubectl get pods -n monitor-sa -o wide -l app=prometheus
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+prometheus-server-689fb8cdbc-kcsw2 1/1 Running 0 5m39s 10.244.36.70 k8s-node1
+
+# 想要使配置生效可用如下命令热加载:
+[root@master ~]# curl -X POST http://10.244.36.70:9090/-/reload
+
+# 查看log
+[root@master ~]# kubectl logs -n monitor-sa prometheus-server-689fb8cdbc-kcsw2
+```
+
+注意:
+
+```shell
+# 热加载速度比较慢,可以暴力重启prometheus,如修改上面的prometheus-cfg.yaml文件之后,可执行如下强制删除:
+[root@master ~]# kubectl delete -f prometheus-cfg.yaml
+[root@master ~]# kubectl delete -f prometheus-deploy.yaml
+# 然后再通过apply更新:
+[root@master ~]# kubectl apply -f prometheus-cfg.yaml
+[root@master ~]# kubectl apply -f prometheus-deploy.yaml
+#注意:线上最好热加载,暴力删除可能造成监控数据的丢失
+```
+
+## 三:Grafana的部署
+
+#### 1.Grafana介绍
+
+Grafana是一个跨平台的开源的度量分析和可视化工具,可以将采集的数据可视化的展示,并及时通知给告警接收方
+
+它主要有以下六大特点:
+
+1)展示方式:快速灵活的客户端图表,面板插件有许多不同方式的可视化指标和日志,官方库中具有丰富的仪表盘插件,比如热图、折线图、图表等多种展示方式
+
+2)数据源:Graphite,InfluxDB,OpenTSDB,Prometheus,Elasticsearch,CloudWatch和KairosDB等
+
+3)通知提醒:以可视方式定义最重要指标的警报规则,Grafana将不断计算并发送通知,在数据达到阈值时通过Slack、PagerDuty等获得通知
+
+4)混合展示:在同一图表中混合使用不同的数据源,可以基于每个查询指定数据源,甚至自定义数据源
+
+5)注释:使用来自不同数据源的丰富事件注释图表,将鼠标悬停在事件上会显示完整的事件元数据和标记
+
+#### 2.Grafana安装
+
+```shell
+[root@master prome]# cat grafana.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: monitoring-grafana
+ namespace: kube-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ task: monitoring
+ k8s-app: grafana
+ template:
+ metadata:
+ labels:
+ task: monitoring
+ k8s-app: grafana
+ spec:
+ containers:
+ - name: grafana
+ image: 10.0.0.230/xingdian/heapster-grafana-amd64:v5.0.4
+ #heleicool/heapster-grafana-amd64:v5.0.4
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/ssl/certs
+ name: ca-certificates
+ readOnly: true
+ - mountPath: /var
+ name: grafana-storage
+ env:
+ - name: INFLUXDB_HOST
+ value: monitoring-influxdb
+ - name: GF_SERVER_HTTP_PORT
+ value: "3000"
+ # The following env variables are required to make Grafana accessible via
+ # the kubernetes api-server proxy. On production clusters, we recommend
+ # removing these env variables, setup auth for grafana, and expose the grafana
+ # service using a LoadBalancer or a public IP.
+ - name: GF_AUTH_BASIC_ENABLED
+ value: "false"
+ - name: GF_AUTH_ANONYMOUS_ENABLED
+ value: "true"
+ - name: GF_AUTH_ANONYMOUS_ORG_ROLE
+ value: Admin
+ - name: GF_SERVER_ROOT_URL
+ # If you're only using the API Server proxy, set this value instead:
+ # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
+ value: /
+ volumes:
+ - name: ca-certificates
+ hostPath:
+ path: /etc/ssl/certs
+ - name: grafana-storage
+ emptyDir: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
+ # If you are NOT using this as an addon, you should comment out this line.
+ kubernetes.io/cluster-service: 'true'
+ kubernetes.io/name: monitoring-grafana
+ name: monitoring-grafana
+ namespace: kube-system
+spec:
+ # In a production setup, we recommend accessing Grafana through an external Loadbalancer
+ # or through a public IP.
+ # type: LoadBalancer
+ # You could also use NodePort to expose the service at a randomly-generated port
+ # type: NodePort
+ ports:
+ - port: 80
+ targetPort: 3000
+ selector:
+ k8s-app: grafana
+ type: NodePort
+```
+
+创建:
+
+```shell
+[root@master prome]# kubectl apply -f grafana.yaml
+deployment.apps/monitoring-grafana created
+service/monitoring-grafana created
+```
+
+查看:
+
+```shell
+[root@master prome]# kubectl get pods -n kube-system -l task=monitoring -o wide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+monitoring-grafana-7c5c6c7486-rbt62 1/1 Running 0 9s 10.244.1.83 node-3
+```
+
+```shell
+[root@master prome]# kubectl get svc -n kube-system | grep grafana
+monitoring-grafana NodePort 10.101.77.194 80:30919/TCP 76s
+```
+
+## 四:配置Grafana
+
+浏览器访问:
+
+![image-20220602013222284](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013222284.png)
+
+添加数据源:
+
+![image-20220602013322234](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013322234.png)
+
+指定Prometheus地址:
+
+![image-20220602013441712](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013441712.png)
+
+导入监控模板:
+
+![image-20220602013943317](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013943317.png)
+
+![image-20220602014027197](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014027197.png)
+
+注意:
+
+官方下载监控模板:https://grafana.com/dashboards?dataSource=prometheus&search=kubernetes
+
+![image-20220602014152927](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014152927.png)
+
+![image-20220602014212551](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014212551.png)
+
+展示:
+
+![image-20220602014306247](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014306247.png)
+
+![image-20220602014321106](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014321106.png)
+
+![image-20220602014337431](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014337431.png)
\ No newline at end of file