需要以下环境
大致功能:通过在K8s集群中部署blackbox工具(用于监控服务,检查网络可用性)和Grafana、Prometheus(监控可视化面板)更直观的体现网络连通性,可以进行警报和分析
本文章通过若海博客的【Kubernetes 集群上安装 Blackbox 监控网站状态】和【Kubernetes 集群上安装 Grafana 和 Prometheus】整合而成
确保主节点和子节点都有Docker环境(最好是同一个版本)
//安装Docker,一键安装(如有安装可以忽略)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
//开启docker、并设置开机自启
systemctl start docker & systemctl enable docker
apt update
apt install -y wireguard
echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf
sysctl -p /etc/sysctl.d/ip_forward.conf
//以下Token值请保存,任意字符串
export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6
export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4)
export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4)
export INSTALL_K3S_SKIP_DOWNLOAD=true
export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s
if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then
DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL}
fi
curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL
chmod a+x /usr/local/bin/k3s
curl -Ls https://get.k3s.io | sh -s - server \
--cluster-init \
--token $SERVER_TOKEN \
--node-ip $PRIVATE_IP \
--node-external-ip $PUBLIC_IP \
--advertise-address $PRIVATE_IP \
--service-node-port-range 5432-9876 \
--flannel-backend wireguard-native \
--flannel-external-ip
//安装Docker,一键安装(如有安装可以忽略)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
//开启docker、并设置开机自启
systemctl start docker & systemctl enable docker
//子节点代码
apt update
apt install -y wireguard
echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf
sysctl -p /etc/sysctl.d/ip_forward.conf
export SERVER_IP=43.129.195.33 //此ip填你的主节点地址
export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6
export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4)
export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4)
export INSTALL_K3S_SKIP_DOWNLOAD=true
export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s
if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then
DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL}
fi
curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL
chmod a+x /usr/local/bin/k3s
curl -Ls https://get.k3s.io | sh -s - agent \
--server https://$SERVER_IP:6443 \
--token $SERVER_TOKEN \
--node-ip $PRIVATE_IP \
--node-external-ip $PUBLIC_IP
//拉取镜像
docker pull rehiy/blackbox
//一键启动
docker run -d \
--name blackbox \
--restart always \
--publish 9115:9115 \
--env "NODE_NAME=guangzhou-taozi" \
--env "NODE_OWNER=Taozi" \
--env "NODE_REGION=广州" \
--env "NODE_ISP=TencentCloud" \
--env "NODE_BANNER=From Taozii-www.xiongan.host" \
rehiy/blackbox
//开始注册
docker logs -f blackbox
在主节点创建一个目录,名字任意,然后在同一目录中创建两个文件(grafpro.yaml、grafpro.sh)
kind: Deployment
apiVersion: apps/v1
metadata:
name: &name grafpro
labels:
app: *name
spec:
selector:
matchLabels:
app: *name
template:
metadata:
labels:
app: *name
spec:
initContainers:
- name: busybox
image: busybox
command:
- sh
- -c
- |
if [ ! -f /etc/prometheus/prometheus.yml ]; then
cat <<EOF >/etc/prometheus/prometheus.yml
global:
scrape_timeout: 25s
scrape_interval: 1m
evaluation_interval: 1m
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
EOF
fi
volumeMounts:
- name: *name
subPath: etc
mountPath: /etc/prometheus
containers:
- name: grafana
image: grafana/grafana
securityContext:
runAsUser: 0
ports:
- containerPort: 3000
volumeMounts:
- name: *name
subPath: grafana
mountPath: /var/lib/grafana
- name: prometheus
image: prom/prometheus
securityContext:
runAsUser: 0
ports:
- containerPort: 9090
volumeMounts:
- name: *name
subPath: etc
mountPath: /etc/prometheus
- name: *name
subPath: prometheus
mountPath: /prometheus
volumes:
- name: *name
hostPath:
path: /srv/grafpro
type: DirectoryOrCreate
---
kind: Service
apiVersion: v1
metadata:
name: &name grafpro
labels:
app: *name
spec:
selector:
app: *name
ports:
- name: grafana
port: 3000
targetPort: 3000
- name: prometheus
port: 9090
targetPort: 9090
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: &name grafpro
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
spec:
rules:
- host: grafana.example.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: *name
port:
name: grafana
- host: prometheus.example.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: *name
port:
name: prometheus
tls:
- secretName: default
//警告:请修改路径和访问域名
# 配置存储路径
export GRAFPRO_STORAGE=${GRAFPRO_STORAGE:-"/srv/grafpro"}
# 配置访问域名
export GRAFANA_DOMAIN=${GRAFPRO_DOMAIN:-"grafana.example.org"}
export PROMETHEUS_DOMAIN=${PROMETHEUS_DOMAIN:-"prometheus.example.org"}
# 修改参数并部署服务
cat grafpro.yaml \
| sed "s#/srv/grafpro#$GRAFPRO_STORAGE#g" \
| sed "s#grafana.example.org#$GRAFANA_DOMAIN#g" \
| sed "s#prometheus.example.org#$PROMETHEUS_DOMAIN#g" \
| kubectl apply -f -
chmod +x grafpro.sh
./grafpro.sh
注意以下,开启端口9115、9090
浏览器打开地址http://grafana.example.org 账号密码都是admin,首次登录,提示修改密码,修改后自动跳到控制台
浏览器打开http://grafana.example.org/connections/datasources/选择第一个,然后编辑URL为:http://127.0.0.1:9090 然后保存
然后选择创建好的Prometheus,导入面板
浏览器打开http://prometheus.example.org,查看信息
//回到主节点的/srv/grafpro/etc目录下
编辑yml文件,备份一下原有的yml,创建新的yml
mv prometheus.yml prometheus00.yml
//以下是yml文件内容(若部署时修改了负载名称blackbox-exporter,下文的配置文件也要做相应的修改)
global:
scrape_timeout: 15s
scrape_interval: 1m
evaluation_interval: 1m
scrape_configs:
# prometheus
- job_name: prometheus
static_configs:
- targets:
- 127.0.0.1:9090
# blackbox_all
- job_name: blackbox_all
static_configs:
- targets:
- blackbox-gz:9115
labels:
region: '广州,腾讯云'
# http_status_gz
- job_name: http_status_gz
metrics_path: /probe
params:
module: [http_2xx] #配置get请求检测
static_configs:
- targets:
- https://www.example.com
labels:
project: 测试1
desc: 测试网站描述1
- targets:
- https://www.example.org
labels:
project: 测试2
desc: 测试网站描述2
basic_auth:
username: ******
password: ******
relabel_configs:
- target_label: region
replacement: '广州,腾讯云'
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox-gz:9115:80
然后重启svc,方法如下:首先查看pod
kubectl get pod
然后删除查看到关于grafana的pod,然后稍等几分钟即可
kubectl delete pod *
下载附件json在Grafana仪表盘里导入即可
导入后可以查看到监控仪已经开始了,显示各项信息
]]>作为VPN服务器的Win2003 添加一张网卡用于连接内部权限子网的端口
选择路由和远程访问
选择配置VPN服务器
选择外网网卡
IP地址指定→自动
名称和地址转换服务→启用基本的名称和地址服务
管理多个远程访问服务器→否
完成后确定
在XP系统中进行配置,添加网络连接
网络连接→虚拟专用网络连接
连接名→随意
VPN服务器选择→win2003的ip地址
下一步完成后就会弹出一个登陆窗口,此时需要去win 2003VPN服务器中创建新的用户和密码
设置用户权限
在XP端中成功登录连接后
运行cmd命令执行ipconfig,可以查看客户机已经获取的新地址与内部网络一致了
查看到时随机地址,只需要去VPN服务器中修改配置,改为静态地址即可
再次测试,就可以看到是自定义的静态地址池
]]>掌握利用Docker实现跨主机容器互连的方法。
要求实验主机能够连接外网,已经正确安装Docker,并关闭防火墙和selinux,各主机配置信息如表1-1所示。
表1-1 主机配置信息表
主机名 | IP地址/子网掩码 | 容器名 |
---|---|---|
node1 | 192.168.123.88/24 | Centos |
node2 | 192.168.123.99/24 | Centos |
node01需要安装两个服务(etcd、flannel),node02需要安装一个服务(flannel)
node01节点
注:1.ETCD_DATA_DIR为etcd数据存放路径 2.ETCD_LISTEN_CLIENT_URLS为监听客户端地址 3.ETCD_NAME为节点名称 4.ETCD_ADVERTISE_CLIENT_URLS为通知etcd服务器
node01节点:
node01节点:
注:查看容器ip
docker inspect 容器名/id | grep IPAddress
yum install -y pssh
两台虚拟机都需要编辑hosts文件
分别写入node01、node02的ip 和主机名
生成免密登录,就可以无需密码直接登陆node01和node02
ssh-keygen -t rsa
ssh-copy-id -i /root/.ssh/id_rsa.pub root@node02-tz
pssh -h host-list.txt -i 'yum install pacemaker pcs -y'
pssh -h host-list.txt -i 'systemctl enable --now pcsd'
设置cluster的账号密码
pssh -h host-list.txt -i 'echo 123456 | passwd --stdin hacluster'
pssh -h host-list.txt -i 'firewall-cmd --add-service=high-availability --permanent'
添加服务
pssh -h host-list.txt -i 'firewall-cmd --reload'
加载防火墙
pcs cluster auth node01-tz node02-tz
pcs cluster setup --name tz-cluster node01-tz node02-tz
pcs cluster start –all
pcs cluster enable --all
pcs status cluster
两台节点需要安装httpd服务,不需要启动
pssh -h host-list.txt -i 'yum install httpd -y'
vim /etc/httpd/conf.d/server_status.conf
ExtendedStatus On
<Location /server-status>
SetHandler server-status
Require local
</Location>
并写入一个测试页面到/var/www/html/index.html
[root@node01-tz ~]# pcs property set stonith-enabled=false <==关闭 stonith 功能
[root@node01-tz ~]# pcs property set no-quorum policy=ignore <==忽略仲裁
[root@node01-tz ~]# pcs property set default-resource-stickiness="INFINITY" <==设置资源超 时时间
[root@node01-tz ~]# pcs property list<==显示设定
pcs resource create VIP ocf:heartbeat:IPaddr2 ip=192.168.123.111 cidr_netmask=24 op monitor interval=30s <==设定 VIP
pcs status resources <==显示资源信息
添加 httpd 资源:
pcs resource create Web123-Cluster ocf:heartbeat:apache configfile=/etc/httpd/conf/httpd.conf statusurl="http://127.0.0.1/server-status" op monitor interval=1min
[root@node01-tz ~]# pcs constraint colocation add Web123-Cluster with VIP INFINITY
[root@node01-tz ~]# pcs constraint order VIP then Web123-Cluster <==设定启动顺讯:先启动 VIP 在启动 WEB123-Cluster
pcs constraint <==显示资源情况
停止 ndoe1 节点的 httpd 资源,客户端再测试:
pcs cluster stop node01-tz
就自动切换到了node02上
firewall-cmd --add-port=2224/tcp --permanent
firewall-cmd --reload
访问https://192.168.123.111:2224
①80端口占用查询:
netstat -tlnp|grep 80
杀死已经存在的80端口服务
Kill -9 进程号
firewall-cmd --zone=public
--add-port=80/tcp --permanent ###添加80端口
Firewall-cmd --reload ###重载防火墙
②ssh密钥生成信息只能存在一个,需要手动添加到本机与对方的auth开头文件和konw文件中>>>>>/root/.ssh/
]]>