CentOS部署kubernetes集群

2018/03/23 Kubernetes

1、环境规划

软件 版本
操作系统 CentOS 7.4.1708
Kubernetes 1.9
Docker 17.12-ce
Etcd 3.0

1.1 角色说明

角色 IP 组件
master 10.100.4.195 kube-apiserver、kube-controller-manager、kuber-scheduler、etcd
node01 10.100.4.197 kubelet、kube-proxy、docker、flannel、etcd
node02 10.100.4.198 kubelet、kube-proxy、docker、flannel、etcd

1.2 主机名规划

主机名 IP地址
k8s-master 10.100.4.195
k8s-node01 10.100.4.197
k8s-node02 10.100.4.198

1.3 注意事项

  1. 关闭selinux
  2. 配置集群之间可以使用主机名通信
  3. 配置集群时间同步

2、安装 docker

2.1 安装依赖程序包

yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

2.2 添加官方 repo

sudo yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

2.3 安装 docker

sudo yum -y install docker-ce

2.4 配置镜像加速及私有仓库

cat << EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["https://registry.docker-cn.com"],
  "insecure-registries":["10.100.4.214:1180"]
}
EOF

2.5 启动 docker

systemctl enable docker.service
systemctl start docker.service

3、自签 TLS 证书

在 k8s-master 上创建证书。

组件 使用的证书
etcd ca.pem, server.pem, server-key.pem
kube-apiserver ca.pem, server.pem, server-key.pem
kubelet ca.pem, ca-key.pem
kube-proxy ca.pem, kube-proxy.pem, kube-proxy-key.pem
kubectl ca.pem, admin.pem, admin-key.pem

3.1 安装证书生成工具cfssl

# cd /usr/local/src/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

3.2 创建 CA 配置文件

root@k8s-master:~ # mkdir ssl
root@k8s-master:~ # cd ssl
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

3.3 创建 CA 证书签名请求

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

3.4 生成 CA 证书和私钥

root@k8s-master:~/ssl # cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2018/02/28 21:59:57 [INFO] generating a new CA key and certificate from CSR
2018/02/28 21:59:57 [INFO] generate received request
2018/02/28 21:59:57 [INFO] received CSR
2018/02/28 21:59:57 [INFO] generating key: rsa-2048
2018/02/28 21:59:57 [INFO] encoded CSR
2018/02/28 21:59:57 [INFO] signed certificate with serial number 714256023081772983620804741878806952429252686236

执行后生成ca证书如下图:

3.5 创建 server 证书

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "10.100.4.195",
      "10.100.4.197",
      "10.100.4.198",
      "10.10.10.1",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

3.6 生成 server 证书和私钥

root@k8s-master:~/ssl # cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
2018/02/28 22:09:18 [INFO] generate received request
2018/02/28 22:09:18 [INFO] received CSR
2018/02/28 22:09:18 [INFO] generating key: rsa-2048
2018/02/28 22:09:18 [INFO] encoded CSR
2018/02/28 22:09:18 [INFO] signed certificate with serial number 133916481400738459646669406581156212974360962358
2018/02/28 22:09:18 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

执行后生成server证书如下图:

3.7 创建 admin 证书

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

生成 admin 证书和私钥:

root@k8s-master:~/ssl # cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
2018/02/28 22:13:51 [INFO] generate received request
2018/02/28 22:13:51 [INFO] received CSR
2018/02/28 22:13:51 [INFO] generating key: rsa-2048
2018/02/28 22:13:52 [INFO] encoded CSR
2018/02/28 22:13:52 [INFO] signed certificate with serial number 24078040860929152951931170145900083173027346974
2018/02/28 22:13:52 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

执行后生成 admin 证书如下图:

3.8 创建 kube-proxy 证书

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

生成 kube-proxy 客户端证书和私钥

root@k8s-master:~/ssl # cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
2018/02/28 22:17:04 [INFO] generate received request
2018/02/28 22:17:04 [INFO] received CSR
2018/02/28 22:17:04 [INFO] generating key: rsa-2048
2018/02/28 22:17:04 [INFO] encoded CSR
2018/02/28 22:17:04 [INFO] signed certificate with serial number 618140961241638612222272776333101371052850426497
2018/02/28 22:17:04 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

执行后生成 kube-proxy 证书如下图:

3.9 删除不用的证书文件

root@k8s-master:~/ssl # ls | grep -v pem|xargs -i rm {}
root@k8s-master:~/ssl # ll
总用量 32
-rw------- 1 root root 1679 2月  28 22:13 admin-key.pem
-rw-r--r-- 1 root root 1399 2月  28 22:13 admin.pem
-rw------- 1 root root 1675 2月  28 21:59 ca-key.pem
-rw-r--r-- 1 root root 1359 2月  28 21:59 ca.pem
-rw------- 1 root root 1679 2月  28 22:17 kube-proxy-key.pem
-rw-r--r-- 1 root root 1403 2月  28 22:17 kube-proxy.pem
-rw------- 1 root root 1675 2月  28 22:09 server-key.pem
-rw-r--r-- 1 root root 1602 2月  28 22:09 server.pem

4、部署 etcd 集群

说明:这里仅在master上演示如何安装,其它两台node节点都是相同安装方法,只需要修改etcd.conf即可,把证书复制到其它两台node节点的目录中。

二进制包下载地址:https://github.com/coreos/etcd/releases/tag/v3.2.12

4.1 创建程序安装目录

root@k8s-master:/usr/local/src # mkdir -pv /opt/kubernetes/{bin,cfg,ssl}

4.2 下载 etcd 安装包

cd /usr/local/src/
wget https://github.com/coreos/etcd/releases/download/v3.2.12/etcd-v3.2.12-linux-amd64.tar.gz
tar xf etcd-v3.2.12-linux-amd64.tar.gz
ls etcd-v3.2.12-linux-amd64/
mv etcd-v3.2.12-linux-amd64/etcd* /opt/kubernetes/bin/

4.3 创建 etcd 配置文件

这里是在k8s-master 上的etcd 配置文件,其它两台node节点修改一下名字和IP地址即可。

root@k8s-master:~ # vim /opt/kubernetes/cfg/etcd
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.100.4.195:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.100.4.195:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.100.4.195:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.100.4.195:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://10.100.4.195:2380,etcd02=https://10.100.4.197:2380,etcd03=https://10.100.4.198:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 保存退出

root@k8s-master:~ # mkdir /var/lib/etcd/

4.4 创建 systemd 启动脚本

root@k8s-master:~ # vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/opt/kubernetes/cfg/etcd
ExecStart=/opt/kubernetes/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-state=new \
--cert-file=/opt/kubernetes/ssl/server.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
--peer-cert-file=/opt/kubernetes/ssl/server.pem \
--peer-key-file=/opt/kubernetes/ssl/server-key.pem \
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

4.5 复制生成好的证书到目录

root@k8s-master:~ # cp ssl/ca*.pem ssl/server*.pem /opt/kubernetes/ssl/
root@k8s-master:~ # ls /opt/kubernetes/ssl/
ca-key.pem  ca.pem  server-key.pem  server.pem

4.6 启动 etcd

root@k8s-master:~ # systemctl daemon-reload
root@k8s-master:~ # systemctl enable etcd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
root@k8s-master:~ # systemctl start etcd.service
root@k8s-master:~ # ps -ef|grep etcd

4.7 验证 etcd 集群状态

root@k8s-master:~ # vim /etc/profile.d/etcd.sh
export PATH=/opt/kubernetes/bin:$PATH
root@k8s-master:~ # source /etc/profile.d/etcd.sh

root@k8s-master:~ # etcdctl \
  --ca-file=/opt/kubernetes/ssl/ca.pem \
  --cert-file=/opt/kubernetes/ssl/server.pem \
  --key-file=/opt/kubernetes/ssl/server-key.pem \
  --endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" \
  cluster-health

集群状态如下图:

5、部署 Flannel 网络

注意:仅在两台node节点上去安装flannel

5.1 下载二进制包

cd /usr/local/src/
wget https://github.com/coreos/flannel/releases/download/v0.9.1/flannel-v0.9.1-linux-amd64.tar.gz

tar xf flannel-v0.9.1-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
ls /opt/kubernetes/bin/

5.2 创建 flannel 配置文件

root@k8s-node1:~ # vim /opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379 \
-etcd-cafile=/opt/kubernetes/ssl/ca.pem \
-etcd-certfile=/opt/kubernetes/ssl/server.pem \
-etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"

5.3 创建 flanneld.service 启动脚本

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

5.4 修改 docker.service 启动脚本

cat <<EOF >/usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd  \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

EOF

5.5 启动 flanned 服务

1)写入分配的子网段到etcd,供flanneld使用

# cd /opt/kubernetes/ssl
# /opt/kubernetes/bin/etcdctl \
--ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \
--endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" \
set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'


/opt/kubernetes/bin/etcdctl \
--ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \
--endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" \
get /coreos.com/network/config

启动

systemctl daemon-reload
systemctl enable flanneld
systemctl start flanneld
systemctl restart docker

检查 etcd

# cd /opt/kubernetes/ssl
/opt/kubernetes/bin/etcdctl \
--ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \
--endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" \
ls /coreos.com/network/

查看 etcd 内部的路由表

root@k8s-master:/opt/kubernetes/ssl # /opt/kubernetes/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" ls /coreos.com/network/subnets
/coreos.com/network/subnets/172.17.49.0-24
/coreos.com/network/subnets/172.17.95.0-24

还可以根据路由表看到具体分配到哪个节点

root@k8s-master:/opt/kubernetes/ssl # /opt/kubernetes/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379" get /coreos.com/network/subnets/172.17.49.0-24
{"PublicIP":"10.100.4.198","BackendType":"vxlan","BackendData":{"VtepMAC":"ba:68:02:e1:c7:76"}}

6、创建 kubeconfig 文件

kubelet、kube-proxy 等 Node 机器上的进程与 Master 机器的 kube-apiserver 进程通信时需要认证和授权;

6.1 安装 kubectl 管理工具

注意: 在master上执行安装,请下载对应的Kubernetes版本的安装包。

wget https://dl.k8s.io/v1.9.0/kubernetes-client-linux-amd64.tar.gz
tar xf kubernetes-client-linux-amd64.tar.gz 
cp kubernetes/client/bin/kubectl /opt/kubernetes/bin/

6.2 创建 TLS Bootstrapping Token

export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

6.3 创建 kubelet bootstrapping kubeconfig 文件

root@k8s-master:~ # cd ssl/

root@k8s-master:~ # export KUBE_APISERVER="https://10.100.4.195:6443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=./ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig

# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

6.4 创建 kube-proxy kubeconfig 文件

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=./ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kube-proxy \
  --client-certificate=./kube-proxy.pem \
  --client-key=./kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
  
# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

7、部署master节点

二进制包下载地址:https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-amd64.tar.gz

root@k8s-master:/usr/local/src # tar xf kubernetes-server-linux-amd64.tar.gz 
root@k8s-master:/usr/local/src # cd kubernetes
root@k8s-master:/usr/local/src/kubernetes # tar xf kubernetes-src.tar.gz 
root@k8s-master:/usr/local/src/kubernetes # cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /opt/kubernetes/bin/

7.1 创建 apiserver 部署脚本

apiserver.sh 脚本需要传递两个参数:

  1. master节点的IP地址;
  2. 第二个是etcd 集群的IP地址;
root@k8s-master:~/master_pkg # cat apiserver.sh 
#!/bin/bash

MASTER_ADDRESS=${1:-"10.100.4.195"}
ETCD_SERVERS=${2:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--insecure-bind-address=127.0.0.1 \\
--bind-address=${MASTER_ADDRESS} \\
--insecure-port=8080 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.10.10.0/24 \\
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \\
--etcd-certfile=/opt/kubernetes/ssl/server.pem \\
--etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

执行脚本

root@k8s-master:~ # cp token.csv  /opt/kubernetes/cfg /
root@k8s-master:~/master_pkg # bash apiserver.sh 10.100.4.195 https://10.100.4.195:2379,https://10.100.4.197:2379,https://10.100.4.198:2379

启动 api-server 服务

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver

7.2 创建 controller-manager 配置脚本

root@k8s-master:~/master_pkg # cat controller-manager.sh 
#!/bin/bash

MASTER_ADDRESS=${1:-"127.0.0.1"}

cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager


KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.10.10.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

执行脚本

 root@k8s-master:~/master_pkg # bash controller-manager.sh 127.0.0.1

启动 kube-controller-manager 服务

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager

7.3 创建 scheduler.sh 脚本

root@k8s-master:~/master_pkg # cat scheduler.sh 
#!/bin/bash

MASTER_ADDRESS=${1:-"127.0.0.1"}

cat <<EOF >/opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

执行脚本

root@k8s-master:~/master_pkg # bash scheduler.sh 127.0.0.1

启动 kube-scheduler 服务

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler

7.4 检查master上三个组件是否正常运行

root@k8s-master:~/master_pkg # ps -ef|egrep "kube-apiserver|kube-controller-manager|kube-scheduler"

运行正常如下图:

7.5 查看集群信息

root@k8s-master:~/master_pkg # kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   

8、部署node节点

8.1 复制kube-proxy.kubeconfig到node节点

两个node节点都是相同的操作

scp kube-proxy.kubeconfig root@10.100.4.197:/opt/kubernetes/cfg/
scp kube-proxy.kubeconfig root@10.100.4.198:/opt/kubernetes/cfg/

scp bootstrap.kubeconfig root@10.100.4.197:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig root@10.100.4.198:/opt/kubernetes/cfg/

8.2 安装 kubelet和kube-proxy

两个node节点都是相同的操作,二进制包与安装master用的同一个

tar xf kubernetes-server-linux-amd64.tar.gz 
mv kubernetes/server/bin/kubelet /opt/kubernetes/bin/
mv kubernetes/server/bin/kube-proxy /opt/kubernetes/bin/

chmod +x /opt/kubernetes/bin/*
ls /opt/kubernetes/bin/

8.3 创建 kubelet.sh 部署脚本

kubelet.sh 脚本需要传递两个参数一个是当前node节点的IP地址,一个是集群内使用的DNS地址:10.10.10.2,这个地址不要改,后续会安装dns

1) k8s-node1 节点脚本

root@k8s-node1:~/node_pkg # cat kubelet.sh 
#!/bin/bash

NODE_ADDRESS=${1:-"10.100.4.197"}
DNS_SERVER_IP=${2:-"10.10.10.2"}

cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--allow-privileged=true \\
--cluster-dns=${DNS_SERVER_IP} \\
--cluster-domain=cluster.local \\
--fail-swap-on=false \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

2)k8s-node2 节点脚本

root@k8s-node2:~/node_pkg # cat kubelet.sh 
#!/bin/bash

NODE_ADDRESS=${1:-"10.100.4.198"}
DNS_SERVER_IP=${2:-"10.10.10.2"}

cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--allow-privileged=true \\
--cluster-dns=${DNS_SERVER_IP} \\
--cluster-domain=cluster.local \\
--fail-swap-on=false \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

1) k8s-node1 执行脚本

root@k8s-node1:~/node_pkg # bash kubelet.sh 10.100.4.197 10.10.10.2

2)k8s-node2 执行脚本

root@k8s-node2:~/node_pkg # bash kubelet.sh 10.100.4.198 10.10.10.2

注意:如果启动不起来日志/var/log/message日志中有如下错误

Feb 24 09:56:21 docker-node1-4-197 kubelet: error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope

解决方法: 在master节点上执行一下命令

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

8.4 创建 proxy.sh 部署脚本

两台node节点运行 proxy.sh 脚本需要传递一个参数就是当前节点的IP地址。

root@k8s-node1:~/node_pkg # cat proxy.sh 
#!/bin/bash

NODE_ADDRESS=${1:-"192.168.1.200"}

cat <<EOF >/opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=${NODE_ADDRESS} \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

1) k8s-node1 执行脚本

root@k8s-node1:~/node_pkg # bash proxy.sh 10.100.4.197

2)k8s-node2 执行脚本

root@k8s-node2:~/node_pkg # bash proxy.sh 10.100.4.198

8.5 运行证书请求

在master节点上查看一下是否有证书请求信息

root@k8s-master:~ # kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-3sNkMcmeOvuvp1zoxqBwQ3zxyjpOuovykDjacEOAvpM   15m       kubelet-bootstrap   Pending
node-csr-4CYGuEYqtOgZ4FWN-KB5a7-hIN1i9Gzo6SKdU6zIzbs   15m       kubelet-bootstrap   Pending

签署请求

root@k8s-master:~ # kubectl certificate approve node-csr-3sNkMcmeOvuvp1zoxqBwQ3zxyjpOuovykDjacEOAvpM
root@k8s-master:~ #  kubectl certificate approve node-csr-4CYGuEYqtOgZ4FWN-KB5a7-hIN1i9Gzo6SKdU6zIzbs

在master节点上查看已经加入集群内的node节点

root@k8s-master:~ # kubectl get node
NAME           STATUS    ROLES     AGE       VERSION
10.100.4.197   Ready     <none>    40s       v1.9.0
10.100.4.198   Ready     <none>    40s       v1.9.0

10、创建一个示例

10.1 创建一个nginx pod

root@k8s-master:~ # kubectl run nginx --image=nginx --replicas=3 --port=80
deployment "nginx" created

10.2 查看pod

root@k8s-master:~ # kubectl get pod -o wide
NAME                     READY     STATUS    RESTARTS   AGE       IP            NODE
nginx-7587c6fdb6-cpdd9   1/1       Running   0          1m        172.17.49.2   10.100.4.198
nginx-7587c6fdb6-glpmc   1/1       Running   0          1m        172.17.49.3   10.100.4.198
nginx-7587c6fdb6-hqsq7   1/1       Running   0          1m        172.17.95.2   10.100.4.197

10.3 将 nginx 端口发布

root@k8s-master:~ # kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
service "nginx" exposed

root@k8s-master:~ # kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.10.10.1     <none>        443/TCP        2h
nginx        NodePort    10.10.10.244   <none>        88:42055/TCP   5s
root@k8s-master:~ # kubectl describe svc nginx
Name:                     nginx
Namespace:                default
Labels:                   run=nginx
Annotations:              <none>
Selector:                 run=nginx
Type:                     NodePort
IP:                       10.10.10.244
Port:                     <unset>  88/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  42055/TCP
Endpoints:                172.17.49.2:80,172.17.49.3:80,172.17.95.2:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

说明:
10.10.10.244 是分配给nginx集群的vip;
88 端口是集群内访问使用的端口;
42055 是集群外访问使用的端口;

在 node1 节点访问测试

11、部署 Web UI (Dashboard)

在master节点上部署 dashboard

11.1 创建 dashboard-rbac.yaml

root@k8s-master:~ # mkdir k8sUI
root@k8s-master:~ # cd k8sUI/
root@k8s-master:~/k8sUI # cat dashboard-rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard
  namespace: kube-system
---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kube-system

创建资源

root@k8s-master:~/k8sUI # kubectl create -f dashboard-rbac.yaml 
serviceaccount "kubernetes-dashboard" created
clusterrolebinding "kubernetes-dashboard-minimal" created

11.2 创建 dashboard-deployment.yaml

root@k8s-master:~/k8sUI # cat dashboard-deployment.yaml 
apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      serviceAccountName: kubernetes-dashboard
      containers:
      - name: kubernetes-dashboard
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.7.1
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 100m
            memory: 100Mi
        ports:
        - containerPort: 9090
          protocol: TCP
        livenessProbe:
          httpGet:
            scheme: HTTP
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"

创建资源

root@k8s-master:~/k8sUI # kubectl create -f dashboard-deployment.yaml 
deployment "kubernetes-dashboard" created

11.3 创建 dashboard-service.yaml

root@k8s-master:~/k8sUI # cat dashboard-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090

创建资源

root@k8s-master:~/k8sUI # kubectl create -f dashboard-service.yaml 
service "kubernetes-dashboard" created

11.4 查看dashboard状态

root@k8s-master:~/k8sUI # kubectl get all -n kube-system
NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kubernetes-dashboard   1         1         1            1           18m

NAME                                 DESIRED   CURRENT   READY     AGE
rs/kubernetes-dashboard-698bb888c5   1         1         1         18m

NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kubernetes-dashboard   1         1         1            1           18m

NAME                                 DESIRED   CURRENT   READY     AGE
rs/kubernetes-dashboard-698bb888c5   1         1         1         18m

NAME                                       READY     STATUS    RESTARTS   AGE
po/kubernetes-dashboard-698bb888c5-w2pjl   1/1       Running   8          18m

NAME                       TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
svc/kubernetes-dashboard   NodePort   10.10.10.224   <none>        80:48563/TCP   18m

如果为Running状态就可以通过上面分配的随机48563端口访问dashboard界面了

在浏览器中输入任意一个node节点的IP地址加上端口就可以了

12 部署 kube-DNS

root@k8s-master:~/k8sDNS # cat kube-dns.yaml 
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.10.10.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-kube-dns-amd64:1.14.7
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local.
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --no-negcache
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-dns-sidecar-amd64:1.14.7
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dns

创建 kube-dns

root@k8s-master:~/k8sDNS # kubectl create -f kube-dns.yaml 
service "kube-dns" created
serviceaccount "kube-dns" created
configmap "kube-dns" created
deployment "kube-dns" created

查看 kube-dns 创建是否成功

root@k8s-master:~/k8sDNS # kubectl get all -n kube-system
NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns               1         1         1            1           1m
deploy/kubernetes-dashboard   1         1         1            1           8h

NAME                                 DESIRED   CURRENT   READY     AGE
rs/kube-dns-9d8b5fb76                1         1         1         1m
rs/kubernetes-dashboard-698bb888c5   1         1         1         8h

NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns               1         1         1            1           1m
deploy/kubernetes-dashboard   1         1         1            1           8h

NAME                                 DESIRED   CURRENT   READY     AGE
rs/kube-dns-9d8b5fb76                1         1         1         1m
rs/kubernetes-dashboard-698bb888c5   1         1         1         8h

NAME                                       READY     STATUS    RESTARTS   AGE
po/kube-dns-9d8b5fb76-xtdsh                3/3       Running   0          1m
po/kubernetes-dashboard-698bb888c5-w2pjl   1/1       Running   8          8h

NAME                       TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
svc/kube-dns               ClusterIP   10.10.10.2     <none>        53/UDP,53/TCP   1m
svc/kubernetes-dashboard   NodePort    10.10.10.224   <none>        80:48563/TCP    8h

创建一个 busybox 容器测试DNS解析

root@k8s-master:~/k8sDNS # kubectl run busybox --rm -ti --image=busybox /bin/sh
If you don't see a command prompt, try pressing enter.
# 使用nslookup 命令解析kubernetes 这个service
/ # nslookup kubernetes
Server:    10.10.10.2
Address 1: 10.10.10.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.10.10.1 kubernetes.default.svc.cluster.local

# 在解析我们自己创建的 my-service 服务
/ # nslookup my-service
Server:    10.10.10.2
Address 1: 10.10.10.2 kube-dns.kube-system.svc.cluster.local

Name:      my-service
Address 1: 10.10.10.172 my-service.default.svc.cluster.local

Search

    Table of Contents