Skip to content
目录

一、说明

1.1、现状:

家聊im测试环境基于docker swarm集群,现在迁移至k8s集群。本文档为跨公网部署梳理,仅供测试使用。

1.2、方案介绍:

方案一(本文采用):

1、通过创建虚拟网卡绑定公网ip,使用公网ip注册到集群中,集群中使用公网ip进行通讯

2、通过开机自启脚本,启动网卡。路径/etc/rc.d/init.d/restart_net.sh脚本内容如下

方案二: 通过L2TP或者IPSEC隧道打通网络

shell
cat /etc/rc.d/init.d/restart\_net.sh
systemctl restart network

3、kubeadm方式部署版本为v1.23.16

4、使用flannel网络插件,修改网卡配置(Calico跨网络配置较为复杂,本方案暂不涉及)

名称公网ip内网ip主机操作系统
Master节点xxx.xxx.xxx.xxx192.168.0.96k8s-master01centos7.9
Node节点xxx.xxx.xxx.xxx192.168.1.214k8s-node01centos7.9
Node节点xxx.xxx.xxx.xxx10.0.2.137k8s-node02centos7.9
配置备注
系统版本CentOS 7.9
Docker版本20.10.x
Pod网段172.168.0.0/12
Service网段10.96.0.0/16

二、基础环境配置

2.1、所有节点配置hosts,修改/etc/hosts

shell
cat /etc/hosts
 cat /etc/hosts
::1    localhost    localhost.localdomain    localhost6    localhost6.localdomain6
127.0.0.1    localhost    localhost.localdomain    localhost4    localhost4.localdomain4
127.0.0.1    ecs-xxxx    ecs-xxxx



xxx.xxx.xxx.xxx  k8s-master01
xxx.xxx.xxx.xxx  k8s-node01

2.2、CentOS 7安装yum源

shell
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
 yum install -y yum-utilsdevice-mapper-persistent-data lvm2 yum-utils
 yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
 cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttps://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e'/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

2.3、必备工具安装

shell
yum install wget jq psmisc vim net-tools telnet yum-utilsdevice-mapper-persistent-data lvm2 git -y

2.4、所有节点关闭防火墙、selinux、dnsmasq、swap

shell
systemctl disable --now firewalld 
 systemctl disable --now dnsmasq
 systemctl disable --now NetworkManager



 setenforce 0
 sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
 sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

2.5、关闭swap分区

shell
swapoff -a && sysctl -w vm.swappiness=0
 sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

2.6、安装ntpdate

shell
rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
 yum install ntpdate -y

2.7、所有节点同步时间。时间同步配置:

shell
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
 echo 'Asia/Shanghai' >/etc/timezone
 ntpdate time2.aliyun.com
# 加入到crontab
[root@k8s-master03 ~]# crontab -e
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

2.8、所有节点配置limit:

shell
ulimit -SHn 65535



 vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

2.9、免密登录

Master01节点免密钥登录其他节点,安装过程中生成配置文件和证书均在Master01上操作,集群管理也在Master01上操作,阿里云或者AWS上需要单独一台kubectl服务器。密钥配置如下:

shell
ssh-keygen -t rsa
 for i in k8s-master01 k8s-node01;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

2.10、下载安装所有的源码文件(此步骤忽略)

shell
[root@k8s-master01 opt]# git clone https://gitee.com/dukuan/k8s-ha-install.git

三、内核配置

CentOS7 需要升级内核至4.18+,本地升级的版本为4.19

3.1、在master01节点下载内核:

shell
cd /opt/kernel
 wget http://xxxx/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
 wget http://xxxx/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm

文件下载链接

https://minio.shandianyun.com.cn/collection/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm

https://minio.shandianyun.com.cn/collection/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm

3.2、从master01节点传到其他节点:

shell
for i in  k8s-node01;do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpmkernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm $i:/root/ ; done

3.3、所有节点安装内核

shell
cd /root && yum localinstall -y kernel-ml*

3.4、所有节点更改内核启动顺序

shell
grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg



 grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

3.5、检查默认内核是不是4.19

shell
[root@k8s-master02~]# grubby --default-kernel 
/boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64

3.6、所有节点重启,然后检查内核是不是4.19

shell
[root@k8s-master02~]# uname -a
Linuxk8s-master02 4.19.12-1.el7.elrepo.x86_64 #1 SMP Fri Dec 21 11:06:36 EST 2018x86_64 x86_64 x86_64 GNU/Linux

3.7、所有节点安装ipvsadm:

shell
yum install ipvsadm ipset sysstat conntrack libseccomp -y

3.8、所有节点配置ipvs模块

在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可:

shell
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
shell
vim /etc/modules-load.d/ipvs.conf
    # 加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

然后执行

shell
systemctl enable --now systemd-modules-load.service

3.9、开启一些k8s集群中必须的内核参数,所有节点配置k8s内核:

shell
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward= 1
net.bridge.bridge-nf-call-iptables= 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts= 1
net.ipv4.conf.all.route_localnet= 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720



net.ipv4.tcp_keepalive_time= 600
net.ipv4.tcp_keepalive_probes= 3
net.ipv4.tcp_keepalive_intvl=15
net.ipv4.tcp_max_tw_buckets= 36000
net.ipv4.tcp_tw_reuse= 1
net.ipv4.tcp_max_orphans= 327680
net.ipv4.tcp_orphan_retries= 3
net.ipv4.tcp_syncookies= 1
net.ipv4.tcp_max_syn_backlog= 16384
net.ipv4.ip_conntrack_max= 65536
net.ipv4.tcp_max_syn_backlog= 16384
net.ipv4.tcp_timestamps= 0
net.core.somaxconn= 16384
EOF
shell
sysctl --system

3.10、所有节点配置完内核后,重启服务器,保证重启后内核依旧加载

shell
reboot
 lsmod| grep --color=auto -e ip_vs -e nf_conntrack

四、配置虚拟网卡

4.1、建立虚拟网卡-所有节点都需要

shell
cat > /etc/sysconfig/network-scripts/ifcfg-eth0:1 <<EOF
BOOTPROTO=static
DEVICE=eth0:1
IPADDR=你的公网IP
PREFIX=32
TYPE=Ethernet
USERCTL=no
ONBOOT=yes
EOF

4.2、重启网卡

shell
systemctl restart network

五、安装docker

5.1、所有节点安装docker-ce20.10

shell
yum install docker-ce-20.10.* docker-ce-cli-20.10.* -y

5.2、由于新版Kubelet建议使用systemd,所以把Docker的CgroupDriver也改成systemd

shell
mkdir -p /etc/docker
 cat> /etc/docker/daemon.json <<EOF
{
  "exec-opts":["native.cgroupdriver=systemd"]
}
EOF

5.3、所有节点设置开机自启动Docker

shell
systemctl daemon-reload && systemctl enable --now docker

六、安装Kubernetes组件

6.1、首先在Master01节点查看最新的Kubernetes版本是多少

shell
yum list kubeadm.x86_64 --showduplicates | sort -r

6.2、所有节点安装1.23最新版本kubeadm、kubelet和kubectl:

shell
yum install kubeadm-1.23.16 kubelet-1.23.16 kubectl-1.23.16 -y

6.3、所有节点设置Kubelet开机自启动

shell
systemctl daemon-reload
 systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

七、集群初始化

7.1、集群管理节点初始化(node节点不用执行)

shell
kubeadm init \
--apiserver-advertise-address=k8s-master01节点的IP \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.23.16 \
--control-plane-endpoint=k8s-master01节点的IP \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=172.16.0.0/12  \
--v=5

查看安装后的版本。 kubectl version kubelet --version kubeadm version

查看节点状态

shell
kubectl get node
NAME           STATUS     ROLES                  AGE   VERSION
k8s-master01   NotReady   control-plane,master   12m   v1.23.16

集群日志查看

shell
journalctl -f -u kubelet

集群重新初始化,慎用

shell
kubeadm reset

如遇到docker模式错误,可执行

shell
rm /etc/containerd/config.toml
systemctl restart containerd
docker info | grep -i "Cgroup Driver" 
vim /etc/docker/daemon.json
"exec-opts": ["native.cgroupdriver=systemd"]

master初始化成功后执行 用于命令补全,生产环境慎用

shell
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

2、node节点加入集群

node节点加入集群

shell
默认情况下,令牌会在24小时后过期。如果要在当前令牌过期后将节点加入集群, 则可以通过在控制平面节点上运行以下命令来创建新令牌:


kubeadm token create --print-join-command


生成样例
kubeadm join xx.xx.xx.xx:6443 --token 1nekmi.bbeh7z42ej149yde     --discovery-token-ca-cert-hash sha256:e979b2ecdc64fa7209192b59d4c353f0c2122c9f5033024e927631d266dc1bd2

所有节点初始化完成后,查看集群状态

shell
kubectl get node
NAME           STATUS   ROLES                  AGE   VERSION
k8s-master01   Ready    control-plane,master   12h   v1.23.16
k8s-node01     Ready    <none>                 12h   v1.23.16

7.3、安装cni网络插件

shell
curl -OL https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

修改如下配置为pod网络

shell
net-conf.json: |
    {
      "Network": "172.16.0.0/12",
      "Backend": {
        "Type": "vxlan"
      }
    }

修改配置后yaml文件样例

yaml
---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
- apiGroups:
  - "networking.k8s.io"
  resources:
  - clustercidrs
  verbs:
  - list
  - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "172.16.0.0/12",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: docker.io/flannel/flannel-cni-plugin:v1.1.2
       #image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.2
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: docker.io/flannel/flannel:v0.21.2
       #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.21.2
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: docker.io/flannel/flannel:v0.21.2
       #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.21.2
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --public-ip=$(PUBLIC_IP)    # 添加此参数,申明公网IP
        - --iface=eth0                # 添加此参数,绑定网卡
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: PUBLIC_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
              
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

查看本地k8s环境配置的pod cidr值:

shell
cat /etc/kubernetes/manifests/kube-controller-manager.yaml  | grep cluster-cidr

创建flannel

shell
kubectl apply -f kube-flannel.yaml
 查看
 kubectl get po -A
NAMESPACE      NAME                                   READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-7h6gx                  1/1     Running   0          12m
kube-flannel   kube-flannel-ds-mf8zc                  1/1     Running   0          12m
kube-flannel   kube-flannel-ds-xl7qh                  1/1     Running   0          12m
kube-system    coredns-54d67798b7-lgc5v               1/1     Running   0          53m
kube-system    coredns-54d67798b7-z669h               1/1     Running   0          53m
查看
kubectl get node
NAME           STATUS   ROLES                  AGE   VERSION
k8s-master01   Ready    control-plane,master   54m   v1.20.9
k8s-node01     Ready    <none>                 48m   v1.20.9
k8s-node02     Ready    <none>                 48m   v1.20.9

八、部署Nginx服务进行环境测试

8.1、测试yaml文件

shell
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-test
  labels:
    name: nginx-test
spec:
  replicas: 1
  selector:
    name: nginx-test
  template:
    metadata:
      labels:
       name: nginx-test
    spec:
      containers:
      - name: nginx-test
        image: docker.io/nginx
-------


apiVersion: v1
kind: Service
metadata:
  name: nginx-test
  labels:
   name: nginx-test
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    name: http
    nodePort: 30088
  selector:
    name: nginx-test

8.2、创建

shell
kubectl apply -f nginx

8.3、测试

shell
[root@k8s-master01 softwares]# kubectl get po -owide
NAME                                            READY   STATUS    RESTARTS      AGE   IP           NODE         NOMINATED NODE   READINESS GATES
nginx-deployment-ingresstest-74d589986c-9n2c6   1/1     Running   1 (11h ago)   11h   172.16.1.8   k8s-node01   <none>           <none>
nginx-test-jsrdv                                1/1     Running   1 (11h ago)   12h   172.16.1.9   k8s-node01   <none>           <none>


[root@k8s-master01 softwares]# curl 172.16.1.9
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>


<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>


<p><em>Thank you for using nginx.</em></p>
</body>
</html>



[root@k8s-master01 softwares]# kubectl get svc -owide
NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE   SELECTOR
kubernetes      ClusterIP   10.96.0.1      <none>        443/TCP        12h   <none>
nginx-service   ClusterIP   10.96.63.228   <none>        80/TCP         11h   app=nginx
nginx-test      NodePort    10.96.41.87    <none>        80:30088/TCP   12h   name=nginx-test


[root@k8s-master01 softwares]# curl 10.96.41.87
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>


<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>


<p><em>Thank you for using nginx.</em></p>
</body>

8.4、节点标签配置(可选)

shell
#添加节点标签
kubectl label nodes k8s-master01 app=middleware
kubectl label nodes k8s-node01 app=jlimwork
kubectl label nodes k8s-node2 app=jlimwork01
#删除节点标签 
kubectl label nodes k8s-master app-
kubectl label nodes k8s-node1 app-
#查看node 标签 
kubectl get node --show-labels 
kubectl get pod -o wide --all-namespaces  --show-labels
#修改标签
kubectl label nodes k8s-master app=jlimdbxiugai --overwrite

九、安装dashboard面板

9.1、dashboard安装

shell
#执行yanl
wget https://minio.shandianyun.com.cn:8543/collection/k8s/yaml/dashboard.yaml
wget https://minio.shandianyun.com.cn:8543/collection/k8s/yaml/dashboard-user.yaml
kubectl apply -f dashboard.yaml
kubectl  apply -f dashboard-user.yaml
#NodePort网络
kubectl patch svc kubernetes-dashboard -n kubernetes-dashboard -p '{"spec":{"type":"NodePort","ports":[{"port":443,"targetPort":8443,"nodePort":30001}]}}'

9.2、配置登录token文件

shell
kubectl get pod -A -n
#用于获取登录token
#kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
kubectl describe -n kubernetes-dashboard secrets admin-user 
#通过配置文件登录
#找到admin secrets
kubectl -n kubernetes-dashboard get secrets
#获取token
DASH_TOCKEN=$(kubectl -n kubernetes-dashboard get secrets admin-user-token-ltrhx -o jsonpath={.data.token} |base64 -d)
#设置 kubeconfig 文件中的一个集群条目
kubectl config set-cluster kubernetes --server=192.168.255.100:30443 --kubeconfig=/usr/local/src/dashbord-admin.conf
#设置 kubeconfig 文件中的一个用户条目
kubectl config set-credentials kubernetes-dashboard --token=$DASH_TOCKEN --kubeconfig=/usr/local/src/dashbord-admin.conf
#设置 kubeconfig 文件中的一个上下文条目
kubectl config set-context kubernetes-dashboard@kubernetes --cluster=kubernetes --user=kubernetes-dashboard --kubeconfig=/usr/local/src/dashbord-admin.conf
#设置 kubeconfig 文件中的当前上下文
kubectl config use-context kubernetes-dashboard@kubernetes --kubeconfig=/usr/local/src/dashbord-admin.conf
#保存配置文件到本地
sz /usr/local/src/dashbord-admin.conf

个人项目仅供展示,涉及内容不做任何形式贩卖