centos8.2二进制安装单master k8s1.18.5

一、环境准备

单master架构图

iShot2020-07-0912.22.20

1.1 实验环境

角色 IP地址 主机名 docker版本 硬件配置 系统 内核 安装组件
master1 10.0.0.30 k8s-master1 19.03.4 2c4g CentOS8.2 4.18.0-193.el8.x86_64 kube-apiserver,kube-controller-manager,kube-scheduler,etcd
node1 10.0.0.33 k8s-node1 19.03.4 2c4g CentOS8.2 4.18.0-193.el8.x86_64 kubelet,kube-proxy,docker,etcd
node2 10.0.0.34 k8s-node2 19.03.4 2c4g CentOS8.2 4.18.0-193.el8.x86_64 kubelet,kube-proxy,docker,etcd

CentOS8.2采用最小化安装,并执行了以下脚本

#!/usr/bin/env bash
#

#修改系统yum源为aliyun并添加epel源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
[ ! -e /etc/yum.repos.d/CentOS-Base.repo ] && curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-8.repo

dnf clean all
dnf makecache

yum install -y https://mirrors.aliyun.com/epel/epel-release-latest-8.noarch.rpm

sed -i 's|^#baseurl=https://download.fedoraproject.org/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel*
sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel*


dnf -y install tar wget net-tools git vim tree lrzsz htop iftop iotop psmisc python36 python3-devel zlib zlib-devel gcc gcc-c++ conntrack-tools jq socat bash-completion telnet nload strace tcpdump lsof sysstat

#关闭防火墙、selinux、NetworkManager
systemctl disable firewalld NetworkManager
sed -i '7s/enforcing/disabled/' /etc/selinux/config

#同步时间计划任务
#rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm && dnf -y install wntp

#sed -i '/*\/10 \* \* \* \* \/usr\/sbin\/ntpdate ntp2\.aliyun\.com &>\/dev\/null/d' /var/spool/cron/root
#echo "*/10 * * * * /usr/local/bin/ntpdate ntp2.aliyun.com &>/dev/null" >>/var/spool/cron/root

#历史命令显示时间
sed -i '/HISTFILESIZE=2000/d' /etc/bashrc
sed -i '/HISTSIZE=2000/d' /etc/bashrc
sed -i '/HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S "/d' /etc/bashrc
sed -i '/export HISTTIMEFORMAT/d' /etc/bashrc
cat >>/etc/bashrc<<'EOF'
HISTFILESIZE=2000
HISTSIZE=2000
HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S "
export HISTTIMEFORMAT
EOF
source /etc/bashrc

#修改系统最大文件描述符
sed -i '/root soft nofile 65535/d' /etc/security/limits.conf
sed -i '/root hard nofile 65535/d' /etc/security/limits.conf
sed -i '/* soft nofile 65535/d' /etc/security/limits.conf
sed -i '/* hard nofile 65535/d' /etc/security/limits.conf
cat >>/etc/security/limits.conf<<'EOF'
root soft nofile 65535
root hard nofile 65535
* soft nofile 65535
* hard nofile 65535
EOF

#设置pip国内源
mkdir ~/.pip
cat >~/.pip/pip.conf<<EOF
[global]
index-url = https://pypi.tuna.tsinghua.edu.cn/simple/
EOF

pip3 install glances mycli

#个人习惯执行命令 netstat -ntpl,所以做一个别名
sed -i.bak "8c alias n='netstat -ntpl'" ~/.bashrc && source ~/.bashrc


#centos8使用NetworkManager管理网络
systemctl enable NetworkManager

reboot

1.2 配置master节点可以免密登陆node节点

生成密钥对

ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa &>/dev/null

编辑expect自动化交互脚本

这里机器用户名是root,密码是国际标准通用密码1

cat >ssh.exp <<'EOF'
#!/usr/bin/expect
if { $argc !=2 } {
  send_user "usage: expect ssh.exp file ip\n"
  exit
}

set file [lindex $argv 0]
set ip [lindex $argv 1]
set password "1"

spawn ssh-copy-id -i $file -p 22 root@$ip
expect {
  "yes/no" {send "yes\r";exp_continue}
  "*password" {send "$password\r"}
}
expect eof
EOF

编辑shell脚本循环执行expect脚本

#编辑脚本
cat > ssh.sh <<'EOF'
#!/bin/bash
for i in 30 33 34
do
  expect ~/ssh.exp ~/.ssh/id_rsa.pub 10.0.0.$i
done
EOF

#安装expect
dnf -y install expect

#执行脚本
sh ssh.sh

1.3 编辑环境变量脚本

mkdir -p /opt/k8s/script
cat >/opt/k8s/script/env.sh <<EOF
export NODE_IPS=(10.0.0.30 10.0.0.33 10.0.0.34)
export ETCD_NAMES=(etcd-1 etcd-2 etcd-3)
export NODE_NAMES=(k8s-master1 k8s-node1 k8s-node2)
EOF

1.4 每个节点配置host信息

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} 'cat >> /etc/hosts << EOF
10.0.0.30 k8s-master1
10.0.0.31 k8s-master2
10.0.0.32 k8s-master3
10.0.0.33 k8s-node1
10.0.0.34 k8s-node2
10.0.0.35 k8s-node3
EOF'
  done

1.5 禁用防火墙和selinux

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} 'systemctl stop firewalld && systemctl disable firewalld && setenforce 0'
  done  

#永久修改,重启服务器后生效
sed -i '7s/enforcing/disabled/' /etc/selinux/config

1.6 关闭swap

⚠️centos7中是 /dev/mapper/centos-swap swap swap defaults 0 0

⚠️centos8中/dev/mapper/cl-swap swap swap defaults 0 0

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "swapoff -a && sed -i 's/^\/dev\/mapper\/cl-swap/#&/' /etc/fstab"
  done

1.7 将桥接的IPv4流量传递到iptables的链

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} 'cat >/etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF' && modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf
  done

1.6 配置时间同步

master节点操作

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} dnf -y install chrony
  done


#修改服务器地址为阿里云
sed -i -e '/^pool/cserver ntp1.aliyun.com iburst' -e '/^#allow/callow 10.0.0.0/24' /etc/chrony.conf


#node节点修改同步服务器为master节点
export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} sed -in '/^pool/cserver\ k8s-master1\ iburst' /etc/chrony.conf
  done


#启动NTP服务并设置开机自启
source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} 'systemctl enable chronyd && systemctl start chronyd'
  done


#检查端口,chronyd监听udp323端口
source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} netstat -nupl|grep chronyd
  done

#正确输出
>>> 10.0.0.30
udp        0      0 127.0.0.1:323           0.0.0.0:*                           4535/chronyd        
udp6       0      0 ::1:323                 :::*                                4535/chronyd        
>>> 10.0.0.33
udp        0      0 127.0.0.1:323           0.0.0.0:*                           5984/chronyd        
udp6       0      0 ::1:323                 :::*                                5984/chronyd        
>>> 10.0.0.34
udp        0      0 127.0.0.1:323           0.0.0.0:*                           5674/chronyd        
udp6       0      0 ::1:323                 :::*                                5674/chronyd  


#验证同步服务器
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} chronyc sources
  done

#正确输出
>>> 10.0.0.30
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 120.25.115.20                 2   6    17     7  +5664us[+8204us] +/-   34ms
>>> 10.0.0.33
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* k8s-master1                   3   6    17     3   -632us[  +70us] +/- 4645ms
>>> 10.0.0.34
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* k8s-master1                   3   6    17     2    -33us[  -93us] +/- 4940ms

二、创建CA根证书和秘钥

CA根证书说明

  • 为确保安全,kubernetes 系统各组件需要使用 x509 证书对通信进行加密和认证。

  • CA (Certificate Authority) 是自签名的根证书,用来签名后续创建的其它证书。

  • CA证书是集群所有节点共享的,只需要创建一次,后续用它签名其它所有证书。

  • 本文档使用 CloudFlare 的 PKI 工具集 cfssl 创建所有证书。

2.1 创建存放CA根证书的目录

mkdir -p /opt/k8s/cert && cd /opt/k8s/cert

2.2 下载并配置cfssl工具集

cfssl官网

cfssl github地址

cfssl是一个开源的证书管理工具,使用json文件生成证书,相比openssl更方便使用。

#下载cfssl工具集
wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl-certinfo_1.4.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64

#给予执行权限
chmod +x cfssl*

#修改名称并移动到/usr/local/bin
mv cfssl_1.4.1_linux_amd64 /usr/local/bin/cfssl
mv cfssljson_1.4.1_linux_amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_1.4.1_linux_amd64 /usr/local/bin/cfssl-certinfo

2.3 创建CA根证书配置文件

cd /opt/k8s/cert
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "876000h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

配置文件中的一些参数说明

  • signing:表示该证书可用于签名其它证书(生成的 ca.pem 证书中 CA=TRUE);
  • server auth:表示 client 可以用该该证书对 server 提供的证书进行验证;
  • client auth:表示 server 可以用该该证书对 client 提供的证书进行验证;
  • "expiry": "876000h":证书有效期设置为 100 年;

2.4 创建证书签名请求文件

cd /opt/k8s/cert
cat > ca-csr.json <<EOF
{
  "CN": "kubernetes-ca",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing"
    }
  ],
  "ca": {
    "expiry": "876000h"
 }
}
EOF

配置文件中的一些参数说明

  • CN:Common Name:kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name),浏览器使用该字段验证网站是否合法;
  • O:Organization:kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group)
  • kube-apiserver 将提取的 User、Group 作为 RBAC 授权的用户标识;

⚠️注意:

  • 不同证书 csr 文件的 CN、C、ST、L、O、OU 组合必须不同,否则可能出现 PEER'S CERTIFICATE HAS AN INVALID SIGNATURE 错误;

  • 后续创建证书的 csr 文件时,CN 都不相同(C、ST、L、O、OU 相同),以达到区分的目的;

2.5 生成 CA 证书和私钥

cd /opt/k8s/cert
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*pem

#命令执行成功后会生成如下3个文件
ca.csr ca-key.pem  ca.pem

三、部署etcd集群

etcd 是基于 Raft 的分布式 KV 存储系统,由 CoreOS 开发,常用于服务发现、共享配置以及并发控制(如 leader 选举、分布式锁等)。

kubernetes 使用 etcd 集群持久化存储所有 API 对象、运行数据。

本文档介绍部署一个三节点高可用 etcd 集群的步骤:

  • 下载和分发 etcd 二进制文件;
  • 创建 etcd 集群各节点的 x509 证书,用于加密客户端(如 etcdctl) 与 etcd 集群、etcd 集群之间的通信;
  • 创建 etcd 的 systemd unit 文件,配置服务参数;
  • 检查集群工作状态;

etcd 集群节点名称和 IP 如下:

  • k8s-master1:10.0.0.30
  • k8s-node1:10.0.0.33
  • k8s-node2:10.0.0.34

3.1 创建etcd证书和私钥

3.1.1 创建证书签名请求

  • hosts:指定授权使用该证书的 etcd 节点 IP 列表,需要将 etcd 集群所有节点 IP 都列在其中
  • ⚠️文件中的IP是etcd节点的IP,哪些节点部署了etcd就写哪些节点的IP,为了方便后期扩展这里还可以多写几个IP
cd /opt/k8s/cert
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "10.0.0.30",
    "10.0.0.33",
    "10.0.0.34"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing"
    }
  ]
}
EOF

3.1.2 生成证书和私钥

cd /opt/k8s/cert
cfssl gencert -ca=ca.pem \
    -ca-key=ca-key.pem \
    -config=ca-config.json \
    -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls etcd*pem    

#以上命令执行成功后会生成如下文件
etcd.csr  etcd-key.pem  etcd.pem

3.2 下载etcd二进制文件

etcd github地址

etcd官网

创建目录

mkdir /opt/k8s/etcd && cd /opt/k8s/etcd

下载二进制文件

wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gzr
tar xf etcd-v3.4.9-linux-amd64.tar.gz

拷贝etcd命令到/usr/local/bin

cp etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /usr/local/bin

3.3 创建etcd配置文件

这里先创建一个template模版,后续会用sed替换

mkdir /opt/k8s/etcd/cfg && cd /opt/k8s/etcd/cfg
cat > etcd.conf.template << EOF
#[Member]
ETCD_NAME="##ETCD_NAME##"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://##NODE_IP##:2380"
ETCD_LISTEN_CLIENT_URLS="https://##NODE_IP##:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://##NODE_IP##:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://##NODE_IP##:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.0.0.30:2380,etcd-2=https://10.0.0.33:2380,etcd-3=https://10.0.0.34:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

etcd配置文件参数说明

参数 说明
ETCD_NAME 节点名称,集群中唯一
ETCD_DATA_DIR 数据目录
ETCD_LISTEN_PEER_URLS 集群通信监听地址
ETCD_LISTEN_CLIENT_URLS 客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS 集群通告地址
ETCD_ADVERTISE_CLIENT_URLS 客户端通告地址
ETCD_INITIAL_CLUSTER 集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN 集群Token
ETCD_INITIAL_CLUSTER_STATE 加入集群的当前状态,new是新集群,existing表示加入已有集群

3.4 使用systemd管理etcd

cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/k8s/etcd/cfg/etcd.conf
ExecStart=/usr/local/bin/etcd \
--cert-file=/opt/k8s/cert/etcd.pem \
--key-file=/opt/k8s/cert/etcd-key.pem \
--peer-cert-file=/opt/k8s/cert/etcd.pem \
--peer-key-file=/opt/k8s/cert/etcd-key.pem \
--trusted-ca-file=/opt/k8s/cert/ca.pem \
--peer-trusted-ca-file=/opt/k8s/cert/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

3.5 拷贝相关文件到其余node节点

拷贝证书

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} mkdir -p /opt/k8s/cert
    scp -p /opt/k8s/cert/{etcd-key.pem,etcd.pem,ca-key.pem,ca.pem} root@${node_ip}:/opt/k8s/cert
  done

拷贝systemd文件

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp -r /usr/lib/systemd/system/etcd.service root@${node_ip}:/usr/lib/systemd/system/
  done

拷贝etcd配置文件

#先做sed替换,把之前的模版文件中的NODE_IP和ETCD_NAME替换成相对应的
source /opt/k8s/script/env.sh
cd /opt/k8s/etcd/cfg
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" -e "s/##ETCD_NAME##/${ETCD_NAMES[i]}/" etcd.conf.template > etcd-${NODE_IPS[i]}.conf
  done


#拷贝etcd配置文件
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} mkdir -p /opt/k8s/etcd/cfg
    scp etcd-${node_ip}.conf root@${node_ip}:/opt/k8s/etcd/cfg/etcd.conf
  done

拷贝etcd命令

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp -p /usr/local/bin/{etcd,etcdctl} root@${node_ip}:/usr/local/bin
  done

3.6 启动etcd

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd " &
  done

检查启动结果,确保状态为 active (running),否则使用命令journalctl -u etcd查看日志,确认原因

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status etcd|grep Active"
  done

正确输出

>>> 10.0.0.30
   Active: active (running) since Tue 2020-07-07 00:39:54 CST; 1min 5s ago
>>> 10.0.0.33
   Active: active (running) since Tue 2020-07-07 00:39:54 CST; 1min 5s ago
>>> 10.0.0.34
   Active: active (running) since Tue 2020-07-07 00:39:54 CST; 1min 5s ago

3.7 验证服务状态

部署完 etcd 集群后,在任一 etcd 节点上执行如下命令

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    /usr/local/bin/etcdctl \
    --endpoints=https://${node_ip}:2379 \
    --cacert=/opt/k8s/cert/ca.pem \
    --cert=/opt/k8s/cert/etcd.pem \
    --key=/opt/k8s/cert/etcd-key.pem endpoint health
  done

正确输出

>>> 10.0.0.30
https://10.0.0.30:2379 is healthy: successfully committed proposal: took = 7.228637ms
>>> 10.0.0.33
https://10.0.0.33:2379 is healthy: successfully committed proposal: took = 7.620834ms
>>> 10.0.0.34
https://10.0.0.34:2379 is healthy: successfully committed proposal: took = 6.689841ms

3.8 查看当前etcd集群 leader

export ETCD_ENDPOINTS="https://10.0.0.30:2379,https://10.0.0.33:2379,https://10.0.0.34:2379"
etcdctl \
  -w table --cacert=/opt/k8s/cert/ca.pem \
  --cert=/opt/k8s/cert/etcd.pem \
  --key=/opt/k8s/cert/etcd-key.pem \
  --endpoints=${ETCD_ENDPOINTS} endpoint status

输出结果,可见当前的 etcd leader 是10.0.0.30

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://10.0.0.30:2379 | dd4b95995dc266b1 |   3.4.9 |   16 kB |      true |      false |         2 |          8 |                  8 |        |
| https://10.0.0.33:2379 | f1ec1f6015c9d4a4 |   3.4.9 |   20 kB |     false |      false |         2 |          8 |                  8 |        |
| https://10.0.0.34:2379 | 22353e8ece256e71 |   3.4.9 |   20 kB |     false |      false |         2 |          8 |                  8 |        |
+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

四、安装docker

docker官网

docker-ce github地址

docker官方二进制下载地址

所有节点操作

4.1 下载二进制安装包并解压缩

创建目录

mkdir /opt/k8s/docker && cd /opt/k8s/docker

下载包并解压缩

wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.12.tgz

tar xf docker-19.03.12.tgz

4.2 导出docker命令环境变量

cd /opt/k8s/docker
source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp -p docker/* root@${node_ip}:/usr/local/bin
  done

4.3 使用systemd管理docker

cat > /usr/lib/systemd/system/docker.service <<'EOF'
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/local/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF

把docker systemd文件拷贝到所有node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    scp -p /usr/lib/systemd/system/docker.service root@${node_ip}:/usr/lib/systemd/system/docker.service
  done

4.4 创建docker配置文件

所有机器配置加速源并配置docker的启动参数使用systemd,使用systemd是官方的建议

mkdir /etc/docker
cat > /etc/docker/daemon.json<<'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
      "https://gqk8w9va.mirror.aliyuncs.com"
  ],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF

拷贝docker配置文件到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} mkdir /etc/docker
    scp -p /etc/docker/daemon.json root@${node_ip}:/etc/docker/daemon.json
  done

4.5 启动docker并设置开机自启

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl daemon-reload && systemctl enable docker && systemctl start docker "
  done

4.6 检查docker启动状态

确保所有节点docker都为running状态

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "systemctl status docker|grep Active"
  done

正确输出

>>> 10.0.0.30
   Active: active (running) since Tue 2020-07-07 09:07:57 CST; 21s ago
>>> 10.0.0.33
   Active: active (running) since Tue 2020-07-07 09:07:58 CST; 20s ago
>>> 10.0.0.34
   Active: active (running) since Tue 2020-07-07 09:07:59 CST; 20s ago

4.7 设置docker命令自动补全

yum安装的docker会有一个文件/usr/share/bash-completion/completions/docker,这个文件就是自动补全docker命令的文件,二进制安装的没有,把这个文件拷贝过来即可

五、部署Mster Node

kubernetes master 节点运行如下组件:

  • kube-apiserver
  • kube-scheduler
  • kube-controller-manager

5.1 部署 kube-apiserver

5.1.1 生成 kube-apiserver 证书和私钥

创建证书签名请求

cd /opt/k8s/cert
cat > kube-apiserver-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "10.0.0.30",
    "10.0.0.31",
    "10.0.0.32",
    "10.0.0.33",
    "10.0.0.34",
    "10.0.0.35",
    "10.244.1.1",
    "172.16.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

生成证书和私钥

/opt/k8s/cert
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
ls kube-api*pem  


#上述命令执行成功后会生成如下文件
kube-apiserver.csr  kube-apiserver-key.pem  kube-apiserver.pem

5.1.2 下载 kubernetes-server 二进制包

mkdir /opt/k8s/kubernetes-server && cd /opt/k8s/kubernetes-server
wget https://dl.k8s.io/v1.18.5/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz

导出命令

cp kubernetes/server/bin/{apiextensions-apiserver,kubeadm,kube-apiserver,kube-controller-manager,kubectl,kubelet,kube-proxy,kube-scheduler,mounter} /usr/local/bin/

5.1.3 创建 kube-apiserver 配置文件

mkdir /opt/k8s/{cfg,logs}
cat > /opt/k8s/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--etcd-servers=https://10.0.0.30:2379,https://10.0.0.33:2379,https://10.0.0.34:2379 \\
--bind-address=10.0.0.30 \\
--secure-port=6443 \\
--advertise-address=10.0.0.30 \\
--allow-privileged=true \\
--service-cluster-ip-range=172.16.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/k8s/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/k8s/cert/kube-apiserver.pem \\
--kubelet-client-key=/opt/k8s/cert/kube-apiserver-key.pem \\
--tls-cert-file=/opt/k8s/cert/kube-apiserver.pem  \\
--tls-private-key-file=/opt/k8s/cert/kube-apiserver-key.pem \\
--client-ca-file=/opt/k8s/cert/ca.pem \\
--service-account-key-file=/opt/k8s/cert/ca-key.pem \\
--etcd-cafile=/opt/k8s/cert/ca.pem \\
--etcd-certfile=/opt/k8s/cert/etcd.pem \\
--etcd-keyfile=/opt/k8s/cert/etcd-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/k8s/logs/k8s-audit.log"
EOF
  • –logtostderr:启用日志
  • —v:日志等级
  • –log-dir:日志目录
  • –etcd-servers:etcd集群地址
  • –bind-address:监听地址
  • –secure-port:https安全端口
  • –advertise-address:集群通告地址
  • –allow-privileged:启用授权
  • –service-cluster-ip-range:Service虚拟IP地址段
  • –enable-admission-plugins:准入控制模块
  • –authorization-mode:认证授权,启用RBAC授权和节点自管理
  • –enable-bootstrap-token-auth:启用TLS bootstrap机制
  • –token-auth-file:bootstrap token文件
  • –service-node-port-range:Service nodeport类型默认分配端口范围
  • –kubelet-client-xxx:apiserver访问kubelet客户端证书
  • –tls-xxx-file:apiserver https证书
  • –etcd-xxxfile:连接Etcd集群证书
  • –audit-log-xxx:审计日志

5.1.4 启用 TLS Bootstrapping 机制

TLS Bootstraping:

  • Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。

TLS bootstraping 工作流程:

iShot2020-07-0709.53.41

创建kube-apiserver配置文件中/opt/k8s/cfg/kube-apiserver.conf指定的 --token-auth-file=/opt/k8s/cfg/token.csv

  • 格式:token,用户名,UID,用户组
cd /opt/k8s/cfg
export TOKEN_CSV=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
cat > /opt/k8s/cfg/token.csv << EOF
$TOKEN_CSV,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF

5.1.5 使用systemd管理kube-apiserver

cat > /usr/lib/systemd/system/kube-apiserver.service << 'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/k8s/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

5.1.6 启动 kube-apiserver 并设置开机自启

systemctl daemon-reload
systemctl start kube-apiserver && systemctl enable kube-apiserver

检查 kube-apiserver 是否正确启动,如果没有正确启动,使用命令journalctl -u kube-apiserver查看日志

$ systemctl status kube-apiserver |grep Active
   Active: active (running) since Tue 2020-07-07 10:01:15 CST; 37s ago

5.1.7 授权 kubelet-bootstrap 用户允许请求证书

kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

5.2 部署 kube-controller-manager

5.2.1 创建配置文件

cat > /opt/k8s/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=172.16.0.0/24 \\
--cluster-signing-cert-file=/opt/k8s/cert/ca.pem \\
--cluster-signing-key-file=/opt/k8s/cert/ca-key.pem  \\
--root-ca-file=/opt/k8s/cert/ca.pem \\
--service-account-private-key-file=/opt/k8s/cert/ca-key.pem \\
--experimental-cluster-signing-duration=876000h0m0s"
EOF
  • –master:通过本地非安全端口8080连接apiserver。
  • –leader-elect:当该组件启动多个时,自动选举(HA)
  • –cluster-signing-cert-file/–cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致

5.2.2 systemd管理 kube-controller-manager

cat > /usr/lib/systemd/system/kube-controller-manager.service << 'EOF'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/k8s/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

5.2.3 启动 kube-controller-manager 并设置开机自启

systemctl daemon-reload
systemctl start kube-controller-manager && systemctl enable kube-controller-manager

检查 kube-controller-manager 是否正确启动,如果没有正确启动,使用命令journalctl -u kube-controller-manager查看日志

$ systemctl status kube-controller-manager |grep Active
   Active: active (running) since Tue 2020-07-07 10:01:15 CST; 37s ago

5.3 部署 kube-scheduler

5.3.1 创建 kube-scheduler 配置文件

cat > /opt/k8s/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/k8s/logs \
--leader-elect \
--master=127.0.0.1:8080 \
--bind-address=127.0.0.1"
EOF
  • –master:通过本地非安全本地端口8080连接apiserver。
  • –leader-elect:当该组件启动多个时,自动选举(HA)

5.3.2 使用systemd管理 kube-scheduler

cat > /usr/lib/systemd/system/kube-scheduler.service << 'EOF'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/k8s/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

5.3.3 启动 kube-scheduler 并设置开机自启

systemctl daemon-reload
systemctl start kube-scheduler && systemctl enable kube-scheduler

检查 kube-scheduler 是否正确启动,如果没有正确启动,使用命令journalctl -u kube-scheduler查看日志

$ systemctl status kube-scheduler |grep Active
   Active: active (running) since Wed 2020-07-08 09:45:23 CST; 44s ago

5.4 k8s命令自动补全、切换命名空间

5.4.1 设置k8s命令自动补全

dnf -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

5.4.2 配置k8s切换命名空间工具

#克隆工具
git clone https://github.com.cnpmjs.org/ahmetb/kubectx
cp kubectx/kubens /usr/local/bin

#查看所有命名空间
$ kubens

#切换到kube-system命名空间
$ kubens kube-system
Context "kubernetes-admin@kubernetes" modified.
Active namespace is "kube-system".

5.5 查看Master Node节点集群状态

$ kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}

六、部署Worker Node

kubernetes worker 节点运行如下组件:

  • kubelet

  • kube-proxy

  • flannel

⚠️如果后续有pod需要部署在master节点,则在master节点也需要部署kubelet和kube-proxy

⚠️这里把master节点也复用为node节点,即master节点上部署node

6.1 Worker Node节点创建工作目录

在所有worker node节点创建工作目录

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} mkdir -p /opt/k8s/{logs,cfg}
  done

6.2 部署 kubelet

6.2.1 创建 kukelet 配置文件

pause-amd64:3.0原先地址是gcr.io/google-containers/pause-amd64:3.0

⚠️kubelet配置文件中的--hostname-override要与主机名相对应

先做一个模版,后续会用sed替换文件中的主机名--hostname-override

mkdir /opt/k8s/kubelet && cd /opt/k8s/kubelet
cat > kubelet.template << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--hostname-override=##NODE_NAME## \\
--network-plugin=cni \\
--kubeconfig=/opt/k8s/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/k8s/cfg/bootstrap.kubeconfig \\
--config=/opt/k8s/cfg/kubelet-config.yml \\
--cert-dir=/opt/k8s/cert \\
--pod-infra-container-image=pptfz/pause-amd64:3.0"
EOF
  • –hostname-override:显示名称,集群中唯一
  • –network-plugin:启用CNI
  • –kubeconfig:空路径,会自动生成,后面用于连接apiserver
  • –bootstrap-kubeconfig:首次启动向apiserver申请证书
  • –config:配置参数文件
  • –cert-dir:kubelet证书生成目录
  • –pod-infra-container-image:管理Pod网络容器的镜像

使用sed做替换

source /opt/k8s/script/env.sh
cd /opt/k8s/kubelet
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" kubelet.template > kubelet-${NODE_IPS[i]}.conf
  done

#替换完成后会生成如下文件,每一个节点文件中的--hostname-override就是自己的主机名
ls *.conf
kubelet-10.0.0.30.conf  kubelet-10.0.0.33.conf  kubelet-10.0.0.34.conf

拷贝文件到所有节点

source /opt/k8s/script/env.sh
cd /opt/k8s/kubelet
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp kubelet-${node_ip}.conf root@${node_ip}:/opt/k8s/cfg/kubelet.conf
  done

6.2.2 创建 kubelet 参数配置文件

⚠️因为安装的docker已经使用了systemd,因此参数配置文件中的cgroupDriver: systemd也要做修改

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'cat > /opt/k8s/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 172.16.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/k8s/cert/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF'
  done

6.2.3 生成bootstrap.kubeconfig文件

这个配置文件就是能让kubelet连接kube-apiserver,去请求颁发证书

cd /opt/k8s/cfg
export KUBE_APISERVER="https://10.0.0.30:6443" # apiserver IP:PORT
export TOKEN=`awk -F, '{print $1}' /opt/k8s/cfg/token.csv` # 与token.csv里保持一致

# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/cert/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials "kubelet-bootstrap" \
  --token=${TOKEN} \
  --kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

拷贝bootstrap.kubeconfig到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
/opt/k8s/cfg
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp bootstrap.kubeconfig root@${node_ip}:/opt/k8s/cfg
  done

6.2.4 拷贝kubelet、kube-proxy命令到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp -p /usr/local/bin/{kubelet,kube-proxy} ${node_ip}:/usr/local/bin
  done

6.2.5 使用systemd管理 kubelet

cat > /usr/lib/systemd/system/kubelet.service << 'EOF'
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/k8s/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

拷贝文件到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp -p /usr/lib/systemd/system/kubelet.service root@${node_ip}:/usr/lib/systemd/system
  done

6.2.6 启动 kubelet 并设置开机自启

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'systemctl daemon-reload && systemctl start kubelet && systemctl enable kubelet'
  done

验证 kubelet 启动状态

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'systemctl status kubelet|grep active'
  done

正确输出

>>> 10.0.0.30
   Active: active (running) since Tue 2020-07-07 13:27:37 CST; 19s ago
>>> 10.0.0.33
   Active: active (running) since Tue 2020-07-07 13:27:52 CST; 4s ago
>>> 10.0.0.34
   Active: active (running) since Tue 2020-07-07 13:27:52 CST; 4s ago

6.2.7 批准kubelet证书申请并加入集群

⚠️当kubelet启动成功的时候就会有节点过来请求颁发证书,使用命令kubectl get csr查看

查看kukelet证书请求,可以看到有3个节点请求颁发证书

$ kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-4YfUpgFR7xndRzDFy4HHOfvRhO_p7iucRgB8dGptrIM   2m53s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-Fh0hY-oE3W6d7_TlO06d-eV9CD93iF3r3dSQSk9dtSo   2m37s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-kG8QBmV1SUcR39FGV2JtSDpJzEHxlRpJnEJhLv2W7fI   2m38s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

使用命令kubectl certificate approve csr名车批准申请

#使用如下命令批量操作
for i in `kubectl get csr|awk 'NR>1{print $1}'`
  do
    kubectl certificate approve $i
  done

certificatesigningrequest.certificates.k8s.io/node-csr-4YfUpgFR7xndRzDFy4HHOfvRhO_p7iucRgB8dGptrIM approved
certificatesigningrequest.certificates.k8s.io/node-csr-Fh0hY-oE3W6d7_TlO06d-eV9CD93iF3r3dSQSk9dtSo approved
certificatesigningrequest.certificates.k8s.io/node-csr-kG8QBmV1SUcR39FGV2JtSDpJzEHxlRpJnEJhLv2W7fI approved

再次查看kubelet证书请求

$ kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-4YfUpgFR7xndRzDFy4HHOfvRhO_p7iucRgB8dGptrIM   6m35s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-Fh0hY-oE3W6d7_TlO06d-eV9CD93iF3r3dSQSk9dtSo   6m19s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-kG8QBmV1SUcR39FGV2JtSDpJzEHxlRpJnEJhLv2W7fI   6m20s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued

6.3 部署 kube-proxy

6.3.1 创建 kube-proxy 配置文件

export NODE_IPS=(10.0.0.30 10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'cat > /opt/k8s/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/k8s/logs \\
--config=/opt/k8s/cfg/kube-proxy-config.yml"
EOF'
  done

6.3.2 创建 kube-proxy 参数文件

先生成一个模版文件,后续会用sed替换文件中的主机名hostnameOverride

mkdir /opt/k8s/kube-proxy && cd /opt/k8s/kube-proxy
cat > kube-proxy-config.yml.template << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
  kubeconfig: /opt/k8s/cfg/kube-proxy.kubeconfig
hostnameOverride: ##NODE_NAME##
clusterCIDR: 172.16.0.0/24
EOF

使用sed做替换

source /opt/k8s/script/env.sh
cd /opt/k8s/kube-proxy
for (( i=0; i < 3; i++ ))
  do
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" kube-proxy-config.yml.template > kube-proxy-config-${NODE_IPS[i]}.yml
  done

#替换完成后会生成如下文件,每一个节点文件中的hostnameOverride就是本机主机名  
ls kube*.yml
kube-proxy-config-10.0.0.30.yml  kube-proxy-config-10.0.0.33.yml  kube-proxy-config-10.0.0.34.yml

拷贝文件到所有节点

source /opt/k8s/script/env.sh
cd /opt/k8s/kube-proxy
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp kube-proxy-config-${node_ip}.yml root@${node_ip}:/opt/k8s/cfg/kube-proxy-config.yml
  done

6.3.3 生成kube-proxy证书

创建证书请求文件

cd /opt/k8s/cert
cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

生成证书

cfssl gencert -ca=/opt/k8s/cert/ca.pem -ca-key=/opt/k8s/cert/ca-key.pem -config=/opt/k8s/cert/ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

#以上命令执行成功后会生成如下文件
ls kube-proxy*pem
kube-proxy-key.pem  kube-proxy.pem  kube-proxy.csr

6.3.3 生成 kube-proxy.kubeconfig 文件

export KUBE_APISERVER="https://10.0.0.30:6443"
cd /opt/k8s/cfg
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/cert/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
  --client-certificate=/opt/k8s/cert/kube-proxy.pem \
  --client-key=/opt/k8s/cert/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

拷贝 kube-proxy.kubeconfig 到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
cd /opt/k8s/cfg
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp -p kube-proxy.kubeconfig root@${node_ip}:/opt/k8s/cfg
  done

6.3.4 使用systemd管理 kube-proxy

cat > /usr/lib/systemd/system/kube-proxy.service << 'EOF'
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/k8s/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

拷贝文件到node节点

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp -p /usr/lib/systemd/system/kube-proxy.service root@${node_ip}:/usr/lib/systemd/system
  done

6.3.5 启动 kube-proxy 并设置开机自启

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'systemctl daemon-reload && systemctl start kube-proxy && systemctl enable kube-proxy'
  done

验证 kube-proxy 启动状态

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} 'systemctl status kube-proxy|grep active'
  done

正确输出

>>> 10.0.0.30
   Active: active (running) since Tue 2020-07-07 14:06:01 CST; 10s ago
>>> 10.0.0.33
   Active: active (running) since Tue 2020-07-07 14:03:18 CST; 2min 53s ago
>>> 10.0.0.34
   Active: active (running) since Tue 2020-07-07 14:03:18 CST; 2min 53s ago

6.4 部署CNI网络

创建目录

mkdir /opt/k8s/cni && cd /opt/k8s/cni

下载安装包

wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz

导出命令

包解压后全是可执行的二进制文件

mkdir -p /opt/cni/bin
tar xf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin

拷贝命令到node节点

source /opt/k8s/script/env.sh
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   scp -rp /opt/cni root@${node_ip}:/opt 
  done

下载yaml文件

#下载yaml文件并替换镜像仓库地址
cd /opt/k8s/cni
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i -r "s#quay.io/coreos/flannel:.*-amd64#pptfz/flannel:v0.12.0-amd64#g" kube-flannel.yml

部署CNI网络

kubectl apply -f kube-flannel.yml

6.5 授权apiserver访问kubelet

cd /opt/k8s/cfg
cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
      - pods/log
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF

kubectl apply -f apiserver-to-kubelet-rbac.yaml

6.6 验证

网络插件flannel pod状态必须全部为running

$ kubectl -n kube-system get pods
NAME                          READY   STATUS    RESTARTS   AGE
kube-flannel-ds-amd64-ncrzv   1/1     Running   0          11m
kube-flannel-ds-amd64-spn7q   1/1     Running   0          11m
kube-flannel-ds-amd64-vk5qr   1/1     Running   0          11m

网络部署完成后,所有节点状态就都变为了Ready

$ kubectl get nodes 
NAME          STATUS   ROLES    AGE   VERSION
k8s-master1   Ready    <none>   18m   v1.18.5
k8s-node1     Ready    <none>   18m   v1.18.5
k8s-node2     Ready    <none>   18m   v1.18.5

删除node节点的kubelet证书文件

⚠️这几个文件是证书申请审批后自动生成的,每个Node不同,必须删除重新生成

export NODE_IPS=(10.0.0.33 10.0.0.34)
for node_ip in ${NODE_IPS[@]}
  do
   echo ">>> ${node_ip}"
   ssh root@${node_ip} rm -rf /opt/k8s/cert/kubelet*
  done

七、部署CoreDNS

创建目录

mkdir /opt/k8s/coredns && cd /opt/k8s/coredns

下载CoreDNS项目

git clone https://github.com.cnpmjs.org/coredns/deployment.git
cd deployment/kubernetes

默认情况下 CLUSTER_DNS_IP 是自动获取kube-dns的集群ip的,但是由于没有部署kube-dns所以只能手动指定一个集群ip。

编辑文件deploy.sh,注释文件中的 CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}"),修改为CLUSTER_DNS_IP=172.16.0.2,因为在kubelet参数配置文件/opt/k8s/cfg/kubelet-config.yml中指定了clusterDNS=172.16.0.2

#原先内容 103行处
if [[ -z $CLUSTER_DNS_IP ]]; then
  # Default IP to kube-dns IP
  CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")

#修改为如下
if [[ -z $CLUSTER_DNS_IP ]]; then
  # Default IP to kube-dns IP
  # CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")
  CLUSTER_DNS_IP=172.16.0.2

部署CoreDNS

#查看执行效果,并未真正开始部署
./deploy.sh

#执行部署
./deploy.sh | kubectl apply -f -

# 查看 CoreDNS
$ kubectl get svc,pods -n kube-system| grep coredns
pod/coredns-85b4878f78-2ndvz      0/1     Running   0          9s

测试 CoreDNS 解析

mkdir /opt/k8s/yaml && cd /opt/k8s/yaml
cat > busybox.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28.4
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF

kubectl apply -f busybox.yaml
#返回以下内容说明解析正常
$ kubectl exec -i busybox -n default nslookup kubernetes
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead.
Server:    172.16.0.2
Address 1: 172.16.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 172.16.0.1 kubernetes.default.svc.cluster.local

八、部署dashboard

8.1 部署官方dashboard

下载yaml文件

cd /opt/k8s/yaml
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml

默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部

#原先内容
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

#修改为如下    
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30001
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard

创建dashboard

#应用文件
kubectl apply -f recommended.yaml


#查看pod运行状态
$ kubectl get pods -A |grep kubernetes-dashboard 
kubernetes-dashboard   dashboard-metrics-scraper-6b4884c9d5-lrz94   1/1     Running       0          47s
kubernetes-dashboard   kubernetes-dashboard-7f99b75bf4-ndztw        1/1     Running       0          47s

创建service account并绑定默认cluster-admin管理员集群角色

kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

查看token

kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

浏览器访问 https://集群任意节点IP:30001

⚠️只能用火狐浏览器访问

iShot2020-07-0410.58.42

登陆后首界面

iShot2020-07-0912.23.15

8.2 部署kuboard

kuboard官网

安装

kubectl apply -f https://kuboard.cn/install-script/kuboard.yaml
kubectl apply -f https://addons.kuboard.cn/metrics-server/0.3.6/metrics-server.yaml

查看kuboard运行状态

$ kubectl get pods -l k8s.kuboard.cn/name=kuboard -n kube-system
NAME                       READY   STATUS    RESTARTS   AGE
kuboard-7bb89b4cc4-7rqh4   1/1     Running   0          78s

获取管理员token,此Token拥有 ClusterAdmin 的权限,可以执行所有操作

echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)

访问kuboard

Kuboard Service 使用了 NodePort 的方式暴露服务,NodePort 为 32567,浏览器访问http://任意一个Worker节点的IP地址:32567/

iShot2020-07-0912.23.15

登陆后首界面

iShot2020-07-0912.31.15

泡泡吐肥皂o © gitbook.pptfz.top 2021 all right reserved,powered by Gitbook文件修订时间: 秃笔南波湾!!!

results matching ""

    No results matching ""