## 关于K8S的介绍



角色划分

主机名IP所需组件
k8s-master192.168.10.110etcd、kube-apiserver、kube-controller-manager、kube-scheduler
k8s-node1192.168.10.111etcd、kubelet、docker、kube_proxy
k8s-node2192.168.10.112etcd、kubelet、docker、kube_proxy

Master部署

下载软件

后期补上

cfssl安装

mkdir k8s && cd k8s
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x *
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

创建etcd证书

mkdir /k8s/etcd/{bin,cfg,ssl} -p
mkdir /k8s/kubernetes/{bin,cfg,ssl} -p
cd /k8s/etcd/ssl/

etcd ca配置

cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "etcd": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

etcd ca证书配置

cat << EOF | tee ca-csr.json
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

etcd server证书配置

cat << EOF | tee server-csr.json
{
    "CN": "etcd",
    "hosts": [
    "192.168.10.110",
    "192.168.10.111",
    "192.168.10.112"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

生成etcd ca证书和私钥 初始化ca

[root@k8s-master ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca 
2019/10/10 11:37:46 [INFO] generating a new CA key and certificate from CSR
2019/10/10 11:37:46 [INFO] generate received request
2019/10/10 11:37:46 [INFO] received CSR
2019/10/10 11:37:46 [INFO] generating key: rsa-2048
2019/10/10 11:37:46 [INFO] encoded CSR
2019/10/10 11:37:46 [INFO] signed certificate with serial number 715175595181257971019616552202590936167014535469
[root@k8s-master ssl]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  server-csr.json

生成server证书

[root@k8s-master ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd server-csr.json | cfssljson -bare server
2019/10/10 11:50:38 [INFO] generate received request
2019/10/10 11:50:38 [INFO] received CSR
2019/10/10 11:50:38 [INFO] generating key: rsa-2048
2019/10/10 11:50:39 [INFO] encoded CSR
2019/10/10 11:50:39 [INFO] signed certificate with serial number 365983290503647280279808226524681959437054059343
2019/10/10 11:50:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master ssl]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  server.csr  server-csr.json  server-key.pem  server.pem
[root@k8s-master ssl]# 

etcd安装

解压缩&复制启动文件

[root@k8s-master k8s]# ls
etcd-v3.2.12-linux-amd64.tar.gz
[root@k8s-master k8s]# tar zxvf etcd-v3.2.12-linux-amd64.tar.gz 
[root@k8s-master k8s]# cd etcd-v3.2.12-linux-amd64/
[root@k8s-master etcd-v3.2.12-linux-amd64]# ls
Documentation  etcd  etcdctl  README-etcdctl.md  README.md  READMEv2-etcdctl.md
[root@k8s-master etcd-v3.2.12-linux-amd64]# cp etcd etcdctl /k8s/etcd/bin

配置etcd主文件

vim /k8s/etcd/cfg/etcd.conf   
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/data1/etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.10.110:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.10.110:2379"
 
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.10.110:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.10.110:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.10.110:2380,etcd02=https://192.168.10.111:2380,etcd03=https://192.168.10.112:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

#[Security]
ETCD_CERT_FILE="/k8s/etcd/ssl/server.pem"
ETCD_KEY_FILE="/k8s/etcd/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/k8s/etcd/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/k8s/etcd/ssl/server.pem"
ETCD_PEER_KEY_FILE="/k8s/etcd/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/k8s/etcd/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"

配置etcd启动文件

mkdir -p /data1/etcd
vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=simple
WorkingDirectory=/data1/etcd/
EnvironmentFile=-/k8s/etcd/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /k8s/etcd/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" --listen-peer-urls=\"${ETCD_LISTEN_PEE
R_URLS}\" --advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" --initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" --initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" --initial-cluster-state=\"${ETCD_INIT
IAL_CLUSTER_STATE}\" --cert-file=\"${ETCD_CERT_FILE}\" --key-file=\"${ETCD_KEY_FILE}\" --trusted-ca-file=\"${ETCD_TRUSTED_CA_FILE}\" --client-cert-auth=\"${ETCD_CLIENT_CERT_AUTH}\" --peer-cert-file=\"${ETCD_PE
ER_CERT_FILE}\" --peer-key-file=\"${ETCD_PEER_KEY_FILE}\" --peer-trusted-ca-file=\"${ETCD_PEER_TRUSTED_CA_FILE}\" --peer-client-cert-auth=\"${ETCD_PEER_CLIENT_CERT_AUTH}\""
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

配置node节点的etcd

node节点也需要安装etcd服务,因为我们做的是etcd的集群
配置差不多一致,只是/k8s/etcd/cfg/etcd.confETCD_LISTEN_PEER_URLSETCD_LISTEN_CLIENT_URLSETCD_INITIAL_ADVERTISE_PEER_URLSETCD_ADVERTISE_CLIENT_URLS需改成node地址

ETCD_NAME=这个也需要修改一下

还有一点,就是在etcd上面cfssl生成的证书也复制到node节点里

[root@k8s-master ssl]# pwd
/k8s/etcd/ssl
scp * root@192.168.10.111:/$(pwd)
scp * root@192.168.10.112:/$(pwd)

启动etcd

#注意,在此之前,需要完成Node节点的etcd配置,并同时启动node节点的etcd服务
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd

服务检查

查询端口
master:
[root@k8s-master ~]# ss -ntl |grep -w "192.168.10.110"
LISTEN     0      128    192.168.10.110:2379                     *:*                  
LISTEN     0      128    192.168.10.110:2380                     *:*                  

node1:
[root@k8s-node1 ~]# ss -ntl |grep -w "192.168.10.111"
LISTEN     0      128    192.168.10.111:2379                     *:*                  
LISTEN     1      128    192.168.10.111:2380                     *:*    

node2:
[root@k8s-node2 ~]# ss -ntl |grep -w "192.168.10.112"
LISTEN     0      128    192.168.10.112:2379                     *:*                  
LISTEN     0      128    192.168.10.112:2380                     *:*  
健康检查
[root@k8s-master ~]#  /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem --cert-file=/k8s/etcd/ssl/server.pem --key-file=/k8s/etcd/ssl/server-key.pem --endpoints="https://192.168.10.110:2379,https://192.168.10.111:2379,https://192.168.10.112:2379" cluster-health

health

生成kubernets证书与私钥

制作kubernetes ca证书

cd /k8s/kubernetes/ssl
cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
cat << EOF | tee ca-csr.json
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
[root@k8s-master ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
[root@k8s-master ssl]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem

1

制作apiserver证书

cat << EOF | tee server-csr.json
{
    "CN": "kubernetes",
    "hosts": [
      "10.254.0.1",
      "127.0.0.1",
      "192.168.10.110",
      "192.168.10.111",
      "192.168.10.112",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF
[root@k8s-master ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
[root@k8s-master ssl]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  server.csr  server-csr.json  server-key.pem  server.pem
[root@k8s-master ssl]# 

制作kube-proxy证书

cat << EOF | tee kube-proxy-csr.json
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "Beijing",
      "ST": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
[root@k8s-master ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
[root@k8s-master ssl]# ls
ca-config.json  ca-csr.json  ca.pem          kube-proxy-csr.json  kube-proxy.pem  server-csr.json  server.pem
ca.csr          ca-key.pem   kube-proxy.csr  kube-proxy-key.pem   server.csr      server-key.pem
[root@k8s-master ssl]# 

部署kubernetes server

kubernetes master 节点运行如下组件: kube-apiserver kube-scheduler kube-controller-manager kube-scheduler 和 kube-controller-manager 可以以集群模式运行,通过 leader 选举产生一个工作进程,其它进程处于阻塞模式,master三节点高可用模式下可用

解压缩文件

tar -zxvf kubernetes-server-linux-amd64.tar.gz 
cd kubernetes/server/bin/
cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/

部署kube-apiserver组件 创建TLS Bootstrapping Token

[root@elasticsearch01 bin]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
f2c50331f07be89278acdaf341ff1ecc
vim /k8s/kubernetes/cfg/token.csv
f2c50331f07be89278acdaf341ff1ecc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
创建Apiserver配置文件
vim /k8s/kubernetes/cfg/kube-apiserver 
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.10.110:2379,https://192.168.10.111:2379,https://192.168.10.112:2379 \
--bind-address=192.168.10.110 \
--secure-port=6443 \
--advertise-address=192.168.10.110 \
--allow-privileged=true \
--service-cluster-ip-range=10.254.0.0/16 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--token-auth-file=/k8s/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/k8s/kubernetes/ssl/server.pem  \
--tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem \
--client-ca-file=/k8s/kubernetes/ssl/ca.pem \
--service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/k8s/etcd/ssl/ca.pem \
--etcd-certfile=/k8s/etcd/ssl/server.pem \
--etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
创建apiserver systemd文件
vim /usr/lib/systemd/system/kube-apiserver.service 
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/kubernetes/cfg/kube-apiserver
ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
启动服务
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

kube-apiserver_status

部署kube-scheduler组件 创建kube-scheduler配置文件

创建kube-scheduler systemd文件
vim /usr/lib/systemd/system/kube-scheduler.service 
 
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/kubernetes/cfg/kube-scheduler
ExecStart=/k8s/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
启动服务
systemctl daemon-reload
systemctl enable kube-scheduler.service 
systemctl start kube-scheduler.service
systemctl status kube-scheduler.service

kube-scheduler_status

部署kube-controller-manager组件 创建kube-controller-manager配置文件

vim /k8s/kubernetes/cfg/kube-controller-manager

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.254.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/k8s/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/k8s/kubernetes/ssl/ca-key.pem  \
--root-ca-file=/k8s/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/k8s/kubernetes/ssl/ca-key.pem"
创建kube-controller-manager systemd文件
vim /usr/lib/systemd/system/kube-controller-manager.service 
 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/k8s/kubernetes/cfg/kube-controller-manager
ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
启动服务
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

kube-controller-manager_status

验证kubeserver服务

设置环境变量

[root@k8s-master bin]# vim /etc/profile
PATH=/k8s/kubernetes/bin:$PATH
[root@k8s-master bin]# source /etc/profile

查看master服务状态

[root@k8s-master bin]# kubectl get cs,nodes

 kubectl_status

Node部署

kubernetes work 节点运行如下组件:

  1. docker
  2. kubelet
  3. kube-proxy
  4. flannel

系统环境:
CentOS Linux release 7.3.1611 (Core)

Docker环境安装

软件包都已经打包

yum localinstall docker-common-1.12.6-71.git3e8e77d.el7.centos.x86_64.rpm -y
yum localinstall docker-client-1.12.6-71.git3e8e77d.el7.centos.x86_64.rpm -y
yum localinstall docker-1.12.6-71.git3e8e77d.el7.centos.x86_64.rpm -y

启动Docker

systemctl start docker && systemctl enable docker

部署kubelet组件

kublet 运行在每个 worker 节点上,接收 kube-apiserver 发送的请求,管理 Pod 容器,执行交互式命令,如exec、run、logs 等; kublet 启动时自动向 kube-apiserver 注册节点信息,内置的 cadvisor 统计和监控节点的资源使用情况; 为确保安全,只开启接收 https 请求的安全端口,对请求进行认证和授权,拒绝未授权的访问(如apiserver、heapster)

安装二进制文件

tar zxvf kubernetes-node-linux-amd64.tar.gz
cd kubernetes/node/bin/
cp kube-proxy kubelet kubectl /k8s/kubernetes/bin/

复制相关证书到node节点

[root@k8s-master ssl]# pwd
/k8s/kubernetes/ssl
[root@k8s-master ssl]# scp *.pem root@192.168.10.111:$(pwd) 

复制k8s证书

创建kubelet bootstrap kubeconfig文件 通过脚本实现

vim /k8s/kubernetes/cfg/environment.sh
#!/bin/bash
#创建kubelet bootstrapping kubeconfig 
BOOTSTRAP_TOKEN=acdc5c6624a90f1ab0f66ce2b00f71c2
KUBE_APISERVER="https://192.168.10.110:6443"
#设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/k8s/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
 
#设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig
 
# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
 
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
 
#----------------------
 
# 创建kube-proxy kubeconfig文件
 
kubectl config set-cluster kubernetes \
  --certificate-authority=/k8s/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
 
kubectl config set-credentials kube-proxy \
  --client-certificate=/k8s/kubernetes/ssl/kube-proxy.pem \
  --client-key=/k8s/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
 
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
 
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

注意这里也需要设置环境变量:

[root@k8s-node1 bin]# vim /etc/profile
PATH=/k8s/kubernetes/bin:$PATH
[root@k8s-node1 bin]# source /etc/profile

执行脚本:

[root@k8s-node1 cfg]# cd /k8s/kubernetes/cfg/
[root@k8s-node1 cfg]# sh environment.sh 
[root@k8s-node1 cfg]# ls
bootstrap.kubeconfig   environment.sh  initial-setup-ks.cfg  kube-proxy.kubeconfig
[root@k8s-node1 cfg]# 

创建kubelet参数配置模板文件

vim /k8s/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.10.111
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS: ["10.254.0.10"]
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true

创建kubelet配置文件

vim /k8s/kubernetes/cfg/kubelet
 
KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.10.111 \
--kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig \
--config=/k8s/kubernetes/cfg/kubelet.config \
--cert-dir=/k8s/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

创建kubelet systemd文件

vim /usr/lib/systemd/system/kubelet.service 
 
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
 
[Service]
EnvironmentFile=/k8s/kubernetes/cfg/kubelet
ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
 
[Install]
WantedBy=multi-user.target

将kubelet-bootstrap用户绑定到系统集群角色(在master上执行且执行成功后无需执行第二次)

kubectl create clusterrolebinding kubelet-bootstrap \
  --clusterrole=system:node-bootstrapper \
  --user=kubelet-bootstrap

绑定

启动服务

systemctl daemon-reload 
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

kubelet_status

以同样的方法在Node2节点上面操作

Master接受kubelet CSR请求 可以手动或自动 approve CSR 请求。推荐使用自动的方式,因为从 v1.8 版本开始,可以自动轮转approve csr 后生成的证书,如下是手动 approve CSR请求操作方法 查看CSR列表

查看CSR列表(Master上面操作)

[root@k8s-master ~]# kubectl get csr

CSR列表

接受node (Master上操作)

命令:kubectl certificate approve 节点名称

kubectl certificate approve node-csr-C4jGZdaV-gvAk_bxT35e_TlME50zTr5sQGZF0fkSZ7Q
kubectl certificate approve node-csr-gfrNCW9dKRkT12GAxSnwDXaWRuq5oJG5Q5g08JvOz8U

再查看CSR:

CSR列表-2

部署kube-proxy组件

kube-proxy 运行在所有 node节点上,它监听 apiserver 中 service 和 Endpoint
的变化情况,创建路由规则来进行服务负载均衡 1)创建 kube-proxy 配置文件

vim /k8s/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.10.111 \
--cluster-cidr=10.254.0.0/16 \
--kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"

创建kube-proxy systemd文件

vim /usr/lib/systemd/system/kube-proxy.service 
 
[Unit]
Description=Kubernetes Proxy
After=network.target
 
[Service]
EnvironmentFile=-/k8s/kubernetes/cfg/kube-proxy
ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target

启动服务

systemctl daemon-reload 
systemctl enable kube-proxy 
systemctl start kube-proxy
systemctl status kube-proxy

kube-proxy_status

查看集群状态

注意期间要是kubelet,kube-proxy配置错误,比如监听IP或者hostname错误导致node not found,需要删除kubelet-client证书,重启kubelet服务,重启认证csr即可

nodes

Flanneld网络部署

默认没有flanneld网络,Node节点间的pod不能通信,只能Node内通信,为了部署步骤简洁明了,故flanneld放在后面安装 flannel服务需要先于docker启动。flannel服务启动时主要做了以下几步的工作: 从etcd中获取network的配置信息 划分subnet,并在etcd中进行注册 将子网信息记录到/run/flannel/subnet.env中

etcd注册网段

[root@k8s-node1 k8s]# /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem --cert-file=/k8s/etcd/ssl/server.pem --key-file=/k8s/etcd/ssl/server-key.pem --endpoints="https://192.168.10.110:2379,https://192.168.10.111:2379,https://192.168.10.112:2379"  set /k8s/network/config  '{ "Network": "10.254.0.0/16", "Backend": {"Type": "vxlan"}}'
# 返回
{ "Network": "10.254.0.0/16", "Backend": {"Type": "vxlan"}}

flanneld 当前版本 (v0.10.0) 不支持 etcd v3,故使用 etcd v2 API 写入配置 key 和网段数据; 写入的 Pod 网段 ${CLUSTER_CIDR} 必须是 /16 段地址,必须与 kube-controller-manager 的 --cluster-cidr 参数值一致;

flannel安装

[root@k8s-node1 bin]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
[root@k8s-node1 bin]# mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/

配置flanneld

[root@k8s-node1 ~]# vim /k8s/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.10.110:2379,https://192.168.10.111:2379,https://192.168.10.112:2379 -etcd-cafile=/k8s/etcd/ssl/ca.pem -etcd-certfile=/k8s/etcd/ssl/server.pem -etcd-keyfile=/k8s/etcd/ssl/server-key.pem -etcd-prefix=/k8s/network"

创建flanneld systemd文件

vim /usr/lib/systemd/system/flanneld.service

[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
 
[Service]
Type=notify
EnvironmentFile=/k8s/kubernetes/cfg/flanneld
ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
 
[Install]
WantedBy=multi-user.target

*注意:
mk-docker-opts.sh 脚本将分配给 flanneld 的 Pod 子网网段信息写入 /run/flannel/docker 文件,后续 docker 启动时 使用这个文件中的环境变量配置 docker0 网桥; flanneld 使用系统缺省路由所在的接口与其它节点通信,对于有多个网络接口(如内网和公网)的节点,可以用 -iface 参数指定通信接口; flanneld 运行时需要 root 权限;*

配置Docker

配置Docker启动指定子网

vim /usr/lib/systemd/system/docker.service
#添加:
EnvironmentFile=-/run/flannel/subnet.env

启动服务

[root@k8s-node1 ~]# systemctl daemon-reload
[root@k8s-node1 ~]# systemctl stop docker
[root@k8s-node1 ~]# systemctl start flanneld
[root@k8s-node1 ~]# systemctl enable flanneld
[root@k8s-node1 ~]# systemctl start docker
[root@k8s-node1 ~]# systemctl restart kubelet
[root@k8s-node1 ~]# systemctl restart kube-proxy

注意启动flannel前要关闭docker及相关的kubelet这样flannel才会覆盖docker0网桥

[root@k8s-node1 ~]# cat /run/flannel/subnet.env 
[root@k8s-node1 ~]# ls /k8s/
[root@k8s-node1 ~]# ip a

OK

之后,node2也需要部署下

查询节点

master上面操作:

[root@k8s-master ~]# kubectl get nodes
NAME             STATUS   ROLES    AGE   VERSION
192.168.10.111   Ready    <none>   50m   v1.12.0-rc.2
192.168.10.112   Ready    <none>   43m   v1.12.0-rc.2
Last modification:November 2nd, 2019 at 03:47 am