需要以下环境
大致功能:通过在K8s集群中部署blackbox工具(用于监控服务,检查网络可用性)和Grafana、Prometheus(监控可视化面板)更直观的体现网络连通性,可以进行警报和分析
本文章通过若海博客的【Kubernetes 集群上安装 Blackbox 监控网站状态】和【Kubernetes 集群上安装 Grafana 和 Prometheus】整合而成
确保主节点和子节点都有Docker环境(最好是同一个版本)
//安装Docker,一键安装(如有安装可以忽略)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
//开启docker、并设置开机自启
systemctl start docker & systemctl enable docker
apt update
apt install -y wireguard
echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf
sysctl -p /etc/sysctl.d/ip_forward.conf
//以下Token值请保存,任意字符串
export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6
export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4)
export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4)
export INSTALL_K3S_SKIP_DOWNLOAD=true
export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s
if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then
DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL}
fi
curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL
chmod a+x /usr/local/bin/k3s
curl -Ls https://get.k3s.io | sh -s - server \
--cluster-init \
--token $SERVER_TOKEN \
--node-ip $PRIVATE_IP \
--node-external-ip $PUBLIC_IP \
--advertise-address $PRIVATE_IP \
--service-node-port-range 5432-9876 \
--flannel-backend wireguard-native \
--flannel-external-ip
//安装Docker,一键安装(如有安装可以忽略)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
//开启docker、并设置开机自启
systemctl start docker & systemctl enable docker
//子节点代码
apt update
apt install -y wireguard
echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf
sysctl -p /etc/sysctl.d/ip_forward.conf
export SERVER_IP=43.129.195.33 //此ip填你的主节点地址
export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6
export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4)
export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4)
export INSTALL_K3S_SKIP_DOWNLOAD=true
export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s
if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then
DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL}
fi
curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL
chmod a+x /usr/local/bin/k3s
curl -Ls https://get.k3s.io | sh -s - agent \
--server https://$SERVER_IP:6443 \
--token $SERVER_TOKEN \
--node-ip $PRIVATE_IP \
--node-external-ip $PUBLIC_IP
//拉取镜像
docker pull rehiy/blackbox
//一键启动
docker run -d \
--name blackbox \
--restart always \
--publish 9115:9115 \
--env "NODE_NAME=guangzhou-taozi" \
--env "NODE_OWNER=Taozi" \
--env "NODE_REGION=广州" \
--env "NODE_ISP=TencentCloud" \
--env "NODE_BANNER=From Taozii-www.xiongan.host" \
rehiy/blackbox
//开始注册
docker logs -f blackbox
在主节点创建一个目录,名字任意,然后在同一目录中创建两个文件(grafpro.yaml、grafpro.sh)
kind: Deployment
apiVersion: apps/v1
metadata:
name: &name grafpro
labels:
app: *name
spec:
selector:
matchLabels:
app: *name
template:
metadata:
labels:
app: *name
spec:
initContainers:
- name: busybox
image: busybox
command:
- sh
- -c
- |
if [ ! -f /etc/prometheus/prometheus.yml ]; then
cat <<EOF >/etc/prometheus/prometheus.yml
global:
scrape_timeout: 25s
scrape_interval: 1m
evaluation_interval: 1m
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
EOF
fi
volumeMounts:
- name: *name
subPath: etc
mountPath: /etc/prometheus
containers:
- name: grafana
image: grafana/grafana
securityContext:
runAsUser: 0
ports:
- containerPort: 3000
volumeMounts:
- name: *name
subPath: grafana
mountPath: /var/lib/grafana
- name: prometheus
image: prom/prometheus
securityContext:
runAsUser: 0
ports:
- containerPort: 9090
volumeMounts:
- name: *name
subPath: etc
mountPath: /etc/prometheus
- name: *name
subPath: prometheus
mountPath: /prometheus
volumes:
- name: *name
hostPath:
path: /srv/grafpro
type: DirectoryOrCreate
---
kind: Service
apiVersion: v1
metadata:
name: &name grafpro
labels:
app: *name
spec:
selector:
app: *name
ports:
- name: grafana
port: 3000
targetPort: 3000
- name: prometheus
port: 9090
targetPort: 9090
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: &name grafpro
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
spec:
rules:
- host: grafana.example.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: *name
port:
name: grafana
- host: prometheus.example.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: *name
port:
name: prometheus
tls:
- secretName: default
//警告:请修改路径和访问域名
# 配置存储路径
export GRAFPRO_STORAGE=${GRAFPRO_STORAGE:-"/srv/grafpro"}
# 配置访问域名
export GRAFANA_DOMAIN=${GRAFPRO_DOMAIN:-"grafana.example.org"}
export PROMETHEUS_DOMAIN=${PROMETHEUS_DOMAIN:-"prometheus.example.org"}
# 修改参数并部署服务
cat grafpro.yaml \
| sed "s#/srv/grafpro#$GRAFPRO_STORAGE#g" \
| sed "s#grafana.example.org#$GRAFANA_DOMAIN#g" \
| sed "s#prometheus.example.org#$PROMETHEUS_DOMAIN#g" \
| kubectl apply -f -
chmod +x grafpro.sh
./grafpro.sh
注意以下,开启端口9115、9090
浏览器打开地址http://grafana.example.org 账号密码都是admin,首次登录,提示修改密码,修改后自动跳到控制台
浏览器打开http://grafana.example.org/connections/datasources/选择第一个,然后编辑URL为:http://127.0.0.1:9090 然后保存
然后选择创建好的Prometheus,导入面板
浏览器打开http://prometheus.example.org,查看信息
//回到主节点的/srv/grafpro/etc目录下
编辑yml文件,备份一下原有的yml,创建新的yml
mv prometheus.yml prometheus00.yml
//以下是yml文件内容(若部署时修改了负载名称blackbox-exporter,下文的配置文件也要做相应的修改)
global:
scrape_timeout: 15s
scrape_interval: 1m
evaluation_interval: 1m
scrape_configs:
# prometheus
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
# blackbox_all
- job_name: blackbox_all
static_configs:
- targets:
- blackbox-gz:9115
labels:
region: '广州,腾讯云'
# http_status_gz
- job_name: http_status_gz
metrics_path: /probe
params:
module: [http_2xx] #配置get请求检测
static_configs:
- targets:
- https://www.example.com
labels:
project: 测试1
desc: 测试网站描述1
- targets:
- https://www.example.org
labels:
project: 测试2
desc: 测试网站描述2
basic_auth:
username: ******
password: ******
relabel_configs:
- target_label: region
replacement: '广州,腾讯云'
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox-gz:9115:80
然后重启svc,方法如下:首先查看pod
kubectl get pod
然后删除查看到关于grafana的pod,然后稍等几分钟即可
kubectl delete pod *
下载附件json在Grafana仪表盘里导入即可
导入后可以查看到监控仪已经开始了,显示各项信息
]]>下载go的tar.gz包,可以前往阿里云镜像站go页面下载相应版本地址:https://mirrors.aliyun.com/golang/?spm=a2c6h.13651104.mirror-free-trial.1.75b41e57BOxyw5
然后下载到虚拟机中,并且解压到/usr/local/src中
//解压缩go包
[root@master ~]# tar -zxf go1.18.10.linux-amd64.tar.gz -C /usr/local/src
//添加环境变量
[root@master src]# vim /etc/profile
//添加如下:
export GOROOT=/usr/local/src
export PATH=$PATH:$GOROOT/bin
//保存退出后source一下
source /etc/profile
//查看是否成功
go version
[root@master ~]# mkdir 0607tz
[root@master ~]# cd 0607tz/
[root@master 0607tz]# vim main.go
//编辑到文件中
package main
import (
"net/http"
"github.com/gin-gonic/gin"
)
func statusOKHandler(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"status": "success~welcome to study"})
}
func versionHandler(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"version": "v1.1版本"})
}
func main() {
router := gin.New()
router.Use(gin.Recovery())
router.GET("/", statusOKHandler)
router.GET("/version", versionHandler)
router.Run(":8080")
}
初始化项目
[root@master 0607tz]# go mod init 0607tz
go: creating new go.mod: module 0607tz
go: to add module requirements and sums:
go mod tidy
//成功初始化
//设置代理
[root@master 0607tz]# go env -w GOPROXY=https://goproxy.cn,direct
[root@master 0607tz]# go mod tidy
//构建源码
[root@master 0607tz]# CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o k8s-demo main.go
编写dockerfile文件
[root@master 0607tz]# vim Dockerfile
FROM alpine
ADD k8s-demo /data/app/
WORKDIR /data/app/
CMD ["/bin/sh","-c","./k8s-demo"]
构建镜像
[root@master 0607tz]# docker build -t taozheng/k8sdemo:v1 .
打包镜像,传到k8s工作节点
[root@master 0607tz]# docker save -o k8sdemo.tar.gz taozheng/k8sdemo:v1
[root@master 0607tz]# scp k8sdemo.tar.gz node:/root/
k8sdemo.tar.gz 100% 16MB 68.0MB/s 00:00
//在node节点解压镜像
[root@master 0607tz]# vim k8s.yaml
//k8s.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: k8s-demo
namespace: default
labels:
app: k8s-demo
cy: taozheng
spec:
selector:
matchLabels:
app: k8s-demo
replicas: 4
template:
metadata:
labels:
app: k8s-demo
spec:
containers:
- image: taozheng/k8sdemo:v1
imagePullPolicy: IfNotPresent
name: k8s-demo
ports:
- containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 50m
memory: 50Mi
livenessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 10
timeoutSeconds: 3
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 10
timeoutSeconds: 2
创建go的服务
[root@master 0607tz]# vim gosvc.yaml
kind: Service
apiVersion: v1
metadata:
name: k8s-demo-svc
namespace: default
labels:
app: k8s-demo
cy: taozheng
spec:
ports:
- name: api
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: k8s-demo
k8s部署
查看pod和服务信息
修改svc的type类型变成nodePort
[root@master 0607tz]# kubectl edit svc k8s-demo-svc
保存后再次查看,已经修改成功
查看svc标签
浏览器测试访问:
]]>首先做免密登录,三台虚拟机分别生成秘钥文件
//三台都需要操作
ssh-keygen -t rsa
//三台都需要打以下命令,进行秘钥分发
[root@tz1-123 ~]# ssh-copy-id tz1-123
[root@tz1-123 ~]# ssh-copy-id tz2-123
[root@tz1-123 ~]# ssh-copy-id tz3-123
修改防火墙设置后,查看三台虚拟机防火墙状态
//三台,禁止防火墙自启,立即生效
systemctl disable firewalld --now
安装JDK(3台),首先上传安装包到根目录,然后解压在/usr/lib/jvm
//修改环境变量/etc/profile
JAVA_HOME=/usr/lib/jvm/jdk1.8.0_152
JRE_HOME=$JAVA_HOME/jre
CLASS_PATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export JAVA_HOME JRE_HOME CLASS_PATH PATH
首先将Hadoop软件包上传至/root中,并解压在/usr/local/src/下
修改core-site.xml
//将以下内容加在<configuration></configuration>之间
<property>
<!--hdfs地址-->
<name>fs.defaultFS</name>
<value>hdfs://tz1-123:9000</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop/data/tmp</value>
</property>
修改hadoop-env.sh
//在最后添加以下一条
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_152
修改hdfs-site.xml
//将以下内容加在<configuration></configuration>之间
<!--副本集个数-->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!--SecondaryNamenode的http地址-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>tz3-123:50090</value>
</property>
修改yarn-env.sh
//在最后添加以下一条
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_152
修改yarn-site.xml
//将以下内容加在<configuration></configuration>之间
<!-- reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>tz2-123</value>
</property>
修改mapred-env.sh
//在最后添加以下一条
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_152
修改mapred-site.xml
[root@tz1-123 hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@tz1-123 hadoop]# vim mapred-site.xml
//将以下内容加在<configuration></configuration>之间
<!-- 指定mr运行在yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
修改slaves
[root@tz1-123 hadoop]# vim slaves
tz1-123
tz2-123
tz3-123
分发Hadoop软件包
[root@tz1-123 hadoop]# scp -r /usr/local/src/hadoop tz2-123:/usr/local/src/
[root@tz1-123 hadoop]# scp -r /usr/local/src/hadoop tz3-123:/usr/local/src/
修改/etc/profile
//在最后添加以下内容
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
//修改完成后再次分发,并加在环境变量
[root@tz1-123 hadoop]# source /etc/profile
[root@tz1-123 hadoop]# scp /etc/profile tz2-123:/etc/
[root@tz1-123 hadoop]# scp /etc/profile tz3-123:/etc/
在tz1-123(master)上格式化namenode
hdfs namenode -format
启动集群并测试
[hadoop@tz1-123 ~]$ start-dfs.sh
[hadoop@tz2-123 ~]$ start-yarn.sh
[root@tz1-123 hadoop]# jps
8096 NameNode
24690 NodeManager
24882 Jps
8293 DataNode
[root@tz2-123 ~]# jps
30709 NodeManager
24086 DataNode
30567 ResourceManager
781 Jps
[root@tz3-123 ~]# jps
23988 DataNode
604 Jps
30494 NodeManager
HDFS Shell操作以hadoop fs或hdfs dfs开头。
#查看某路径下文件夹
hadoop fs -ls HDFS路径
#在HDFS上创建文件夹
hadoop fs -mkdir HDFS文件夹路径
#在HDFS上创建文件夹(上级目录不存在)
hadoop fs -mkdir -p HDFS文件夹路径
#将本地文件上传到HDFS上
hadoop fs -put 本地文件路径 HDFS路径
#查看集群文件的内容
hadoop fs -cat HDFS文件路径
#从HDFS上下载文件到本地
hadoop fs -get HDFS文件路径 本地路径
#删除HDFS上空文件夹
hadoop fs -rmdir HDFS文件夹路径
#删除HDFS上的非空文件夹
hadoop fs -rm -r HDFS文件夹路径
#删除HDFS上的文件
hadoop fs -rm HDFS文件路径
#将HDFS上的路径剪切至另一个路径下
hadoop fs -mv HDFS源路径 HDFS目标路径
#将HDFS上的路径复制到另一个路径下
hadoop fs -cp HDFS源路径 HDFS目标路径
#在HDFS上创建一个文件
hadoop fs -touchz HDFS路径
我的博客即将同步至腾讯云开发者社区,邀请大家一同入驻:https://cloud.tencent.com/developer/support-plan?invite_code=35n3trqr2ug48
]]>需要创建目录(/tmp/healthy)查看到运行成功,持续监控pod状态,看到pod反复重启
使用 describe 命令查看详细 pod 信息,正常
创建yaml
运行并查看状态
查看详细events
创建yaml文件
运行并进行容器内操作
查看pod的restarts次数
查看pod之前未通过liveness的记录
运行deployment
使用describechakanhttp服务的endpoint
可以看到有4个地址
进入一个容器,删除index.html文件
再使用 describe 命令查看 endpoint
可以看到删除的pod地址已经从endpoint中移除
查看pod的详细信息,看到pod未通过探针检测
查看pod信息,kandaopod处于notready状态
]]>进入目录保存实验文件
并创建一个yaml使用多个标签
[root@master tz123]# cd /root/tz123/labfile/labelfile
[root@master labelfile]# vim labelpod.yaml
kind: Pod
apiVersion: v1
metadata:
name: labelpod
labels:
app: busybox
version: new
spec:
containers:
- name: labelpod
image: busybox
args:
- /bin/sh
- -c
- sleep 30000
创建Pod,并查看pod的label
[root@master labelfile]# kubectl apply -f labelpod.yaml
pod/labelpod created
[root@master labelfile]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
labelpod 1/1 Running 0 11s app=busybox,version=new
为容器添加新标签
[root@master labelfile]# kubectl label pod labelpod time=2019
pod/labelpod labeled
[root@master labelfile]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
labelpod 1/1 Running 0 69s app=busybox,time=2019,version=new
创建新的yaml
[root@master labelfile]# vim labelpod2.yaml
kind: Pod
apiVersion: v1
metadata:
name: labelpod2
labels:
app: httpd
version: new
spec:
containers:
- name: httpd
image: httpd
创建并查看新创建的labelpod2
[root@master labelfile]# kubectl apply -f labelpod2.yaml
[root@master labelfile]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
labelpod 1/1 Running 0 12m app=busybox,time=2019,version=new
labelpod2 0/1 ContainerCreating 0 23s app=httpd,version=new
使用给予等值的标签选择器
[root@master labelfile]# kubectl get pod -l app=httpd
NAME READY STATUS RESTARTS AGE
labelpod2 1/1 Running 0 100s
或
[root@master labelfile]# kubectl get pod -l app==httpd
NAME READY STATUS RESTARTS AGE
labelpod2 1/1 Running 0 114s
使用基于不等值的标签选择器和查看pod针对某标签键的值
[root@master labelfile]# kubectl get pod -l app!=httpd
NAME READY STATUS RESTARTS AGE
labelpod 1/1 Running 0 14m
[root@master labelfile]# kubectl get pod -L app
NAME READY STATUS RESTARTS AGE APP
labelpod 1/1 Running 0 15m busybox
labelpod2 1/1 Running 0 3m5s httpd
将节点1打上标签并查看
[root@master labelfile]# kubectl label node node env=test
node/node labeled
[root@master labelfile]# kubectl get node -L env
NAME STATUS ROLES AGE VERSION ENV
master Ready control-plane,master 91d v1.20.6
node Ready <none> 91d v1.20.6 test
使用nodeselector实现调度,创建新的yaml文件
[root@master labelfile]# vim nsdeploy.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-dy
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
nodeSelector:
env: test
查看deployment中的pod位置
[root@master labelfile]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
labelpod 1/1 Running 0 28m 10.244.167.145 node <none> <none>
labelpod2 1/1 Running 0 15m 10.244.167.146 node <none> <none>
nginx-dy-6dd6c76bcb-667ss 1/1 Running 0 5m19s 10.244.167.148 node <none> <none>
nginx-dy-6dd6c76bcb-q8tqh 1/1 Running 0 5m19s 10.244.167.149 node <none> <none>
nginx-dy-6dd6c76bcb-xc9h7 1/1 Running 0 5m19s 10.244.167.147 node <none> <none>
使用 node affinity 调度,创建一个新的 yaml 文件 nadeploy2.yaml
[root@master labelfile]# vim nadeploy2.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: httpd-dy
labels:
app: httpd
spec:
replicas: 3
selector:
matchLabels:
app: httpd
template:
metadata:
labels:
app: httpd
spec:
containers:
- name: httpd
image: httpd
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: env
operator: In
values:
- test
创建deployment并查看deployment中的pod位置,三个pod都在node上
[root@master labelfile]# kubectl apply -f nadeploy2.yaml
deployment.apps/httpd-dy created
[root@master labelfile]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
httpd-dy-5b4bb9646-g4jzb 1/1 Running 0 33s 10.244.167.150 node <none> <none>
httpd-dy-5b4bb9646-lb876 1/1 Running 0 33s 10.244.167.151 node <none> <none>
httpd-dy-5b4bb9646-q7zcm 0/1 ContainerCreating 0 33s <none> node <none> <none>
labelpod 1/1 Running 0 38m 10.244.167.145 node <none> <none>
labelpod2 1/1 Running 0 26m 10.244.167.146 node <none> <none>
nginx-dy-6dd6c76bcb-667ss 1/1 Running 0 15m 10.244.167.148 node <none> <none>
nginx-dy-6dd6c76bcb-q8tqh 1/1 Running 0 15m 10.244.167.149 node <none> <none>
nginx-dy-6dd6c76bcb-xc9h7 1/1 Running 0 15m 10.244.167.147 node <none> <none>
使用镜像nginx,5个副本
deployment中的pod不能出现在node上
[root@master labelfile]# vim shixun01.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-dy
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: env
operator: Not In
values:
- node
先给core-dns,kubeproxy,dashboard打上标签
[root@master labelfile]# kubectl label -n kube-system pod kube-proxy-kj8j5 app=kubeproxy
[root@master labelfile]# kubectl label -n kube-system pod coredns-7f89b7bc75-n224r app=coredns
查找关键词的pod
搜索dashboard的pod
首先上传mysql数据库的rpm压缩包到主机/opt/software
//解压缩包到当前目录中
[root@master-tz software]# unzip mysql-5.7.18.zip
//进入到rpm软件目录中,首先检查mariadb,如果有就要卸载
//安装数据库
[root@master-tz mysql-5.7.18]# rpm -ivh mysql-community-common-5.7.18-1.el7.x86_64.rpm
[root@master-tz mysql-5.7.18]# rpm -ivh mysql-community-libs-5.7.18-1.el7.x86_64.rpm
[root@master-tz mysql-5.7.18]# rpm -ivh mysql-community-client-5.7.18-1.el7.x86_64.rpm
//安装下面的rpm需要首先安装perl软件
[root@master-tz mysql-5.7.18]# yum install -y net-tools perl
[root@master-tz mysql-5.7.18]# rpm -ivh mysql-community-server-5.7.18-1.el7.x86_64.rpm
修改配置文件/etc/my.cnf
vim /etc/my.cnf
//最后添加五行
default-storage-engine=innodb
innodb_file_per_table
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server=utf8
//最后保存退出
[root@master-tz mysql-5.7.18]# systemctl start mysqld
[root@master-tz mysql-5.7.18]# systemctl status mysqld
[root@master-tz mysql-5.7.18]# cat /var/log/mysqld.log | grep password
2023-03-27T08:52:43.074230Z 1 [Note] A temporary password is generated for root@localhost: KbVXiHlul3:> //查看初始密码,下方需要填写
[root@master-tz mysql-5.7.18]# mysql_secure_installation //重新设定密码,并把密码设置为Password123$
//注:允许远程连接设定为n,表示允许远程连接,其它设定为y
除了以下是n其他都是y
[root@master-tz mysql-5.7.18]# mysql -uroot -pPassword123$
mysql>create database hive_db;
mysql>create user hive identified by 'Password123$';
mysql>grant all privileges on *.* to hive@'%' identified by 'Password123$' with grant
option ;
mysql>grant all privileges on *.* to 'root'@'%'identified by 'Password123$' with grant
option;
mysql>flush privileges;
首先将hive的压缩包上传到虚拟机,并解压,重命名hive,设置hive权限
[root@master-tz ~]# tar -zxf apache-hive-2.0.0-bin.tar.gz -C /usr/local/src/
[root@master-tz ~]# cd /usr/local/src/
[root@master-tz src]# mv apache-hive-2.0.0-bin/ hive
[root@master-tz src]# chown -R hadoop:hadoop hive/
[root@master-tz src]# vim /etc/profile
# set Hive environment
export HIVE_HOME=/usr/local/src/hive # Hive安装目录
export PATH=$HIVE_HOME/bin:$PATH # 添加将Hive的bin目录
export HIVE_CONF_DIR=$HIVE_HOME/conf #Hive的环境变量
[root@master-tz src]# source /etc/profile
首先切换到hadoop用户
[hadoop@master-tz conf]$ cd /usr/local/src/hive/conf
[hadoop@master-tz conf]$ vim hive-site.xml
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--元数据库地址-->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master-tz:3306/hive_db?createDatabaseIfNotExist=true</value>
</property>
<!--mysql用户名-->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<!--mysql中hive用户密码-->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>Password123$</value>
</property>
<!--mysql驱动-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/usr/local/src/hive/tmp</value>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/usr/local/src/hive/tmp/${hive.session.id}_resources</value>
</property>
<property>
<name>hive.querylog.location</name>
<value>/usr/local/src/hive/tmp</value>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/usr/local/src/hive/tmp/operation_logs</value>
</property>
<property>
<name>hive.server2.webui.host</name>
<value>master-tz</value>
</property>
<property>
<name>hive.server2.webui.port</name>
<value>10002</value>
</property>
</configuration>
[hadoop@master-tz conf]$ cp hive-env.sh.template hive-env.sh
[hadoop@master-tz conf]$ vim hive-env.sh
//增加如下配置项
# Set JAVA
export JAVA_HOME=/usr/local/src/java
# Set HADOOP_HOME to point to a specific hadoop install directory
export HADOOP_HOME=/usr/local/src/hadoop
# Hive Configuration Directory can be controlled by:
export HIVE_CONF_DIR=/usr/local/src/hive/conf
# Folder containing extra ibraries required for hive compilation/execution can be
controlled by:
export HIVE_AUX_JARS_PATH=/usr/local/src/hive/lib
将MySQL的驱动jar包上传至虚拟机,然后将该jar包复制到hive安装路径下的lib文件夹中
[root@master-tz software]# cp mysql-connector-java-5.1.46.jar /usr/local/src/hive/lib/
[hadoop@master-tz conf]$ schematool -initSchema -dbType mysql
[hadoop@master-tz ~]$ hive
hive>
如果出现以下情况
则需要去hive-site.xml配置文件修改为
<value>jdbc:mysql://master-tz:3306/hive_db?createDatabaseIfNotExist=true&useSSL=false</value>
]]>第一台节点(主节点): 192.168.123.200 master
第二台节点(从节点): 192.168.123.201 slave
以下文件需要单独下载
云盘地址地址
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.123.200 master-tz
192.168.123.201 slave01-tz
1.systemctl disable firewalld --now
2.setenforce 0
3./etc/selinux/config中的一行修改为SELINUX=disabled
4.swapoff -a
5./etc/fstab中的swap加注释
#/dev/mapper/centos-swap swap swap defaults 0 0
[root@master ~]# modprobe br_netfilter
[root@master ~]# echo "modprobe br_netfilter" >> /etc/profi
le
[root@master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf
yum -y install wget vim ntpdate get
配置时间同步
ntpdate ntp1.aliyun.com
[root@master ~]# rm -rf /etc/yum.repos.d/*
[root@master ~]# wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
[root@master ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@master ~]# wget -O kubernetes.sh https://www.xiongan.host/sh/kubernetes.sh && sh kubernetes.sh
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep ip_vs
yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
systemctl enable kubelet
注:docker的版本是20.10.8
注:每个软件包的作用
Kubeadm : kubeadm 是一个工具,用来初始化 k 8s 集群的
kubelet: 安装 在集群所有节点上,用于启动 Pod 的
kubectl:通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件
上传k8simage-1-20-6.tar.gz到两个节点
docker load -i k8simage-1-20-6.tar.gz
[root@master ~]# kubeadm init --kubernetes-version=1.20.6 --apiserver-advertise-address=192.168.123.200 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification
kubernetes-version 代表 k8s的版本
apiserver-advertise-address 如果master节点有多个网卡,则需要进行指定
pod-network-cidr 指定pod网络的范围。
image repository registry.aliyuncs.com/google_containers 手动指定仓库地址为
registry.aliyuncs.com/google_containers 。
kubeadm 默认从 k 8s.grc.io 拉取镜像 ,但是 k 8s.gcr.io
访问不到,所以需要指定从 registry.aliyuncs.com/google_containers 仓库拉取镜像
配置kubectl的配置文件config,相当于对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s
集群进行管理
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
进入slave节点执行添加到集群,命令是上图中的
[root@slave01-tz ~]# kubeadm join 192.168.123.200:6443 --token d32tmx.utjgdkqxhy9sk517 \
> --discovery-token-ca-cert-hash sha256:d6a0bb61368c23be10444d7a18eab071b750c97c45186020980714fd57b13bdd
再次查看master节点
[root@master-tz ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-tz NotReady control-plane,master 8m31s v1.20.6
slave01-tz NotReady <none> 12s v1.20.6
此时集群状态还是NotReady 状态,因为 没有安装网络 插件 。
若要扩充集群(master节点)
master执行
kubeadm token create --print-join-command
结果在新增节点执行
安装k8s网络组件Calico(master节点)
上传calico.yaml文件到master节点。
[root@master ~]# kubectl apply -f calico.yaml
再次使用kubectl get nodes命令查看节点状态为Ready
上传dashboard_2_0_0.tar.gz和metrics-scrapter-1-0-1.tar.gz到两个节点。
上传kubernetes-dashboard.yaml到master节点。
[root@master ~]# docker load -i dashboard_2_0_0.tar.gz
[root@master ~]# docker load -i metrics-scrapter-1-0-1.tar.gz
[root@master ~]# kubectl apply -f kubernetes-dashboard.yaml
[root@master-tz ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-7445d59dfd-p572g 1/1 Running 0 10s
kubernetes-dashboard-54f5b6dc4b-5zxpm 1/1 Running 0 10s
说明dasbnoard安装成功了。
查看dashboard的service
[root@master-tz ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.101.56.238 <none> 8000/TCP 97s
kubernetes-dashboard ClusterIP 10.97.126.230 <none> 443/TCP 97s
修改service type的类型为NodePort
[root@master ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
[root@master ~]# kubectl get svc -n kubernetes-dashboard
通过浏览器进行访问https://192.168.123.200:30245
通过token登录dashboard(master节点)
创建管理员token,具有查看任何空间的权限,可以管理所有资源对象。
[root@master-tz ~]# kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
clusterrolebinding.rbac.authorization.k8s.io/dashboard-cluster-admin created
[root@master-tz ~]# kubectl get secret -n kubernetes-dashboard
NAME TYPE DATA AGE
default-token-scvqs kubernetes.io/service-account-token 3 14m
kubernetes-dashboard-certs Opaque 0 14m
kubernetes-dashboard-csrf Opaque 1 14m
kubernetes-dashboard-key-holder Opaque 2 14m
kubernetes-dashboard-token-bs98s kubernetes.io/service-account-token 3 14m
[root@master-tz ~]# kubectl describe secret kubernetes-dashboard-token-bs98s -n kubernetes-dashboard
Name: kubernetes-dashboard-token-bs98s
Namespace: kubernetes-dashboard
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard
kubernetes.io/service-account.uid: d0842b14-e79e-4129-b6e3-bfd3c7039334
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1066 bytes
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkdHTy1lQ2tndl9qQ29INUtEMEREMW1iUWhWeENOODB1Q2lOOERSYnN6OTQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1iczk4cyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImQwODQyYjE0LWU3OWUtNDEyOS1iNmUzLWJmZDNjNzAzOTMzNCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.f6iGY-QbB5YQFuaTkU6qR9UBTbFiIcDbpgT40E_ceQZGh3kdWyKzeTB-pWUkrJV1gWFaQt3Er7_brB-T7juO8eywunXkE6Xd_xH7XzaiWbNYFYfr3gMMXI8SmbnpqDKHclqw_tUIgun37ao7YYY_22_mYDdcTSIVFvx9XehK48eJWVfdyy-snuZiTKoR2pKMH0Rau3oXKlw7is8bV7yezeucZnaMPa60N-1KIMAvRM7gXlMX9m_BKiqvxEoru-2FDEoOkiCFXV-juGclxM_Qtn70i9R2JVjPgE5VX_gP7RFHDoXIEwykyjJqOg2fguE9Vy8nKnrfOo0c99aGXxnW_g
使用token值登录
拉取nginx镜像
docker pull nginx
创建nginx应用服务
kubectl create deployment ngix-deployment1 --image nginx --port=80 --replicas=2
创建service服务
kubectl expose deployment ngix-deployment1 --name=nginx --port=80 --target-port=80 --type=NodePort
访问nginx服务
[root@master-tz ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 109m
nginx NodePort 10.110.242.218 <none> 80:31079/TCP 9s
Docker Swarm是Docker原生的集群工具,因而无须使用额外的编排软件创建或管理集群。Docker
Swarm部署更简单,适合规模不大的应用程序环境,尤其适用于简单和快速开发。
Docker Client:客户端 Swarm Manager: 管理器节点 Scheduler:调度器
Discovery Service:服务发现 Swarm Node:工作者节点 Docker Containers:容器
集群管理命令
docker swarm ca:显示和轮转根CA。
docker swarm init:初始化集群。
docker swarm join:作为节点加入集群。
docker swarm join-token:管理加入集群的令牌。
docker swarm leave:脱离集群。
docker swarm unlock:解锁集群。
docker swarm unlock-key:管理解锁密钥。
docker swarm update:更新集群
节点管理命令
docker node demote:将一个或多个管理器节点降级为工作者节点。
docker node inspect:显示一个或多个节点的详细信息。
docker node ls:列出Swarm集群中的节点。
docker node promote:将一个或多个节点升级为管理器节点。
docker node ps:列出在一个或多个节点(默认为当前节点)上运行的任务。
docker node rm:从Swarm集群中删除一个或多个节点。
docker node update:更新节点的选项,如可用性、标签或角色。
主机名 | ip | 角色 |
---|---|---|
Manager | 192.168.123.100 | 主控 |
Worker01 | 192.168.123.101 | 节点01 |
Worker02 | 192.168.123.102 | 节点02 |
在管理器节点上执行以下命令获取加入管理器角色节点的命令(含令牌)
docker swarm init --advertise-addr 192.168.123.100
将上操作中的代码复制到worker01、02主机上
docker node list
]]>ansible是新出现的自动化运维工具,基于Python开发,集合了众多运维工具(puppet、cfengine、chef、func、fabric)的优点,实现了批量系统配置、批量程序部署、批量运行命令等功能。Ansible架构相对比较简单,仅需通过SSH连接客户机执行任务即可
主机名 | ip | 角色 |
---|---|---|
Server | 192.168.123.195 | 主控 |
Backend01 | 192.168.123.196 | 被控01 |
Backend02 | 192.168.123.197 | 被控02 |
这边使用的是阿里云的源
wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
下载完成后就可以安装ansible服务
yum install -y ansible openssh-clients
修改配置文件
vim /etc/ansible/ansible.cfg
//* 71 行,取消注释。开启关闭对未连接的主机 SSH 秘钥检测
host_key_checking = False
mv /etc/ansible/hosts /etc/ansible/hosts.bak ##先备份一下内容
vi /etc/ansible/hosts ##建立目标群组,并写入被管机(目标服务器)的 IP/FQDN
[tz1101]
backend01
backend02
ansible tz1101 -m ping
配置ssh免密登陆
ssh-keygen
一路回车,默认免密通信
ssh-copy-id ip/主机名
把密钥发送到集群主机中
另外在需要/etc/hosts中写入主机名和ip
为集群主机添加单独用户
ansible tz1101 -m user -a 'name=tao state=present'
添加完成用户后再设置一个密码,在server端用pip python 生成哈希密码
首先安装pip python
yum install python-pip -y
生成密码
ansible tz1101 -m user -a 'name=tao password=tarRU/F9EJjRU update_password=always'
查看一下
测试集群安装一个httpd软件的playbook剧本
vim playbook_create_install.yml #编写剧本文件→安装软件
- hosts: tz1101 #集群组名
tasks:
- name: install vsftpd ##任务名称
yum: name=vsftpd state=installed ##安装vsftpd
- name: running and enabled
service: name=vsftpd state=started enabled=yes ##设置开机自启动
创建tag文件yml
注:自定义了tag,他就会只执行带有tag=test2的内容其他标签内容不会执行
自定义变量安装服务
- hosts: tz1101
become: yes
become_method: sudo
tasks:
- name: installed bao
yum: name={{ item }} state=installed
with_items:
- vim-enhanced ##软件名
- wget
- unzip
tags: Taozheng
ansible-playbook bianliang.yml
内网中需要关闭防火墙和selinux
ansible tz1101 -a "setenforce 0 && systemctl stop firewalld"
在当前server主机内安装tree服务
yum install -y tree
群组下的主机也要安装tree软件
ansible tz1101 -m yum -a "name=tree state=installed"/*-m 使用模块 yum命令 -a是具体内容 name是tree state是操作安装
创建roles子目录及内容
mkdir -p roles/ins_httpd/{files,tasks,vars}
回到roles,编写一个yml文件
Vim playbook_httpd.yml
- hosts: tz1101
roles:
- ins_httpd
编写vars下的main文件
vim roles/ins_httpd/vars/main.yml
packages:
- httpd
创建任务剧本
编写tasks下的main文件
vim roles/ins_httpd/tasks/main.yml
- name: httpd is installed
yum: name=httpd state=installed
tags: install_httpd
- name: edit httpd.conf
lineinfile: >
dest=/etc/httpd/conf/httpd.conf
regexp="{{item.regexp}}"
line="{{item.line}}"
with_items:
- { regexp: "^#ServerName",line: "ServerName {{ansible_fqdn}}:80" }
tags: edit_httpd.conf
- name: httpd is running and enabled
service: name=httpd state=started enabled=yes
- name: put index.html
copy: src=index.html dest=/var/www/html owner=root group=root mode=0644
执行剧本
ansible-playbook playbook_httpd.yml
查看写入的httpd的index.html
ansible tz1101 -m shell -a "curl localhost"
yum install -y pssh
两台虚拟机都需要编辑hosts文件
分别写入node01、node02的ip 和主机名
生成免密登录,就可以无需密码直接登陆node01和node02
ssh-keygen -t rsa
ssh-copy-id -i /root/.ssh/id_rsa.pub root@node02-tz
pssh -h host-list.txt -i 'yum install pacemaker pcs -y'
pssh -h host-list.txt -i 'systemctl enable --now pcsd'
设置cluster的账号密码
pssh -h host-list.txt -i 'echo 123456 | passwd --stdin hacluster'
pssh -h host-list.txt -i 'firewall-cmd --add-service=high-availability --permanent'
添加服务
pssh -h host-list.txt -i 'firewall-cmd --reload'
加载防火墙
pcs cluster auth node01-tz node02-tz
pcs cluster setup --name tz-cluster node01-tz node02-tz
pcs cluster start –all
pcs cluster enable --all
pcs status cluster
两台节点需要安装httpd服务,不需要启动
pssh -h host-list.txt -i 'yum install httpd -y'
vim /etc/httpd/conf.d/server_status.conf
ExtendedStatus On
<Location /server-status>
SetHandler server-status
Require local
</Location>
并写入一个测试页面到/var/www/html/index.html
[root@node01-tz ~]# pcs property set stonith-enabled=false <==关闭 stonith 功能
[root@node01-tz ~]# pcs property set no-quorum policy=ignore <==忽略仲裁
[root@node01-tz ~]# pcs property set default-resource-stickiness="INFINITY" <==设置资源超 时时间
[root@node01-tz ~]# pcs property list<==显示设定
pcs resource create VIP ocf:heartbeat:IPaddr2 ip=192.168.123.111 cidr_netmask=24 op monitor interval=30s <==设定 VIP
pcs status resources <==显示资源信息
添加 httpd 资源:
pcs resource create Web123-Cluster ocf:heartbeat:apache configfile=/etc/httpd/conf/httpd.conf statusurl="http://127.0.0.1/server-status" op monitor interval=1min
[root@node01-tz ~]# pcs constraint colocation add Web123-Cluster with VIP INFINITY
[root@node01-tz ~]# pcs constraint order VIP then Web123-Cluster <==设定启动顺讯:先启动 VIP 在启动 WEB123-Cluster
pcs constraint <==显示资源情况
停止 ndoe1 节点的 httpd 资源,客户端再测试:
pcs cluster stop node01-tz
就自动切换到了node02上
firewall-cmd --add-port=2224/tcp --permanent
firewall-cmd --reload
访问https://192.168.123.111:2224
①80端口占用查询:
netstat -tlnp|grep 80
杀死已经存在的80端口服务
Kill -9 进程号
firewall-cmd --zone=public
--add-port=80/tcp --permanent ###添加80端口
Firewall-cmd --reload ###重载防火墙
②ssh密钥生成信息只能存在一个,需要手动添加到本机与对方的auth开头文件和konw文件中>>>>>/root/.ssh/
]]>