Kubernetes-v1.11.1-全自动ansible部署指南

目标

  • 部署3节点的HA Master(v1.11.1目前最新版)+3个worker node 节点
  • 整个集群使用static pod方式启动包括
  • 使用ipvs替代iptables,实现svc网络
  • 容器部署flannel插件,coredns插件等相关插件

镜像

1
2
3
4
5
6
7
8
9
10
REPOSITORY                                                                          TAG                 IMAGE ID            CREATED             SIZE
zhangguanzhang/keepalived 1.3.9 7d0a6d093f78 9 days ago 14MB
freemanliu/kube-proxy-amd64 v1.11.1 d5c25579d0ff 2 weeks ago 97.8MB
freemanliu/kube-scheduler-amd64 v1.11.1 272b3a60cd68 2 weeks ago 56.8MB
freemanliu/kube-controller-manager-amd64 v1.11.1 52096ee87d0e 2 weeks ago 155MB
freemanliu/kube-apiserver-amd64 v1.11.1 816332bd9d11 2 weeks ago 187MB
freemanliu/etcd-amd64 3.2.18 b8df3b177be2 3 months ago 219MB
freemanliu/flannel v0.10.0-amd64 f0fad859c909 6 months ago 44.6MB
freemanliu/pause-amd64 3.1 da86e6ba6ca1 7 months ago 742kB
kairen/haproxy 1.7 733c4b4d75c4 14 months ago 14.7MB

使用软件版本

软件名称 版本
os centos 7.x
etcd v3.2.18
keepalived 1.3.9
flannel v0.10.0-amd64
haproxy 1.7
kubernetes v1.11.1(master=>(kube-apiserver,kube-controller-manager,kube-scheduler))
ipvsadm v1.27

主机布局

  • 请按照如下格式修改主机名
1
vim /etc/hostname
hostname ip soft
k8s-m1 192.168.0.101 haproxy,etcd,kubernetes,flannel
k8s-m2 192.168.0.102 haproxy,etcd,kubernetes,flannel
k8s-m3 192.168.0.103 haproxy,etcd,kubernetes,flannel
k8s-n1 192.168.0.104 kube-proxy,flannel
k8s-n2 192.168.0.105 kube-proxy,flannel
k8s-n2 192.168.0.106 kube-proxy,flannel
  • 新增hosts
1
2
3
4
5
6
7
8
echo '
192.168.0.101 k8s-m1
192.168.0.102 k8s-m2
192.168.0.103 k8s-m3
192.168.0.104 k8s-n1
192.168.0.105 k8s-n2
192.168.0.106 k8s-n3
' >> /etc/hosts

环境准备(以下操作请在每一台主机执行,请确保使用root用户)

  • 可在一台主机完成以下操作后制作相关镜像,使用镜像分发

linux环境设置

  • 配置阿里云yum源
1
2
3
4
5
6
#备份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
# 添加阿里云repo
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#生成缓存
yum makecache
  • 安装ipvsadm,wget ,socat
1
yum -y install ipvsadm wget  socat
  • 加载内核ipvs模块
1
2
3
4
5
6
7
8
9
10
11
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ \$? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
  • 正确加载之后输出如下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
ip_vs_sed 12519 0
ip_vs_nq 12516 0
ip_vs_sh 12688 0
ip_vs_dh 12688 0
ip_vs_lblcr 12922 0
ip_vs_lblc 12819 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 9
ip_vs_wlc 12519 0
ip_vs_lc 12516 0
ip_vs 141473 31 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack 133053 7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
  • 关闭防火墙,设置内核参数,关闭swap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
echo 'net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
vm.swappiness = 0
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
fs.may_detach_mounts = 1' > /etc/sysctl.conf
&& swapoff -a
&& sysctl -w vm.swappiness=0
&& sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
&& systemctl disable --now firewalld
  • 关闭selinux
1
2
3
4
# 将SELINUX=enforcing改为SELINUX=disabled
vim /etc/selinux/config
# 确保 getenforce 命令输出 Disabled
getenforce
  • 确保如下输出为1
1
2
cat /proc/sys/net/ipv4/ip_forward
1

docker 安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3: 更新并安装 Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
systemctl start docker
# 开启开机自启
systemctl enable docker
# 安装指定版本的Docker-CE:
# Step 1: 查找Docker-CE的版本:
# yum list docker-ce.x86_64 --showduplicates | sort -r
# Loading mirror speeds from cached hostfile
# Loaded plugins: branch, fastestmirror, langpacks
# docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable
# docker-ce.x86_64 17.03.1.ce-1.el7.centos @docker-ce-stable
# docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable
# Available Packages
# Step2 : 安装指定版本的Docker-CE: (VERSION 例如上面的 17.03.0.ce.1-1.el7.centos)
# sudo yum -y install docker-ce-[VERSION]
  • 设置docker加速镜像
  • 这里使用阿里云提供的镜像服务,需自己去注册后替换
  • 阿里云镜像帮助指南
1
2
3
4
5
6
mkdir -p /etc/docker/
echo '
{
"registry-mirrors": ["https://xxxx.mirror.aliyuncs.com"]
}
' > /etc/docker/daemon.json
  • 完成操作之后请reboot 重启host

一下操作任选一台MASTER节点操作即可(这里选择k8s-m1)

  • 安装必须软件
1
yum -y install ansible git
  • 下载kubernetes(主要使用到kubelet和kuberctl)
1
2
3
4
5
6
7
8
# 在~目录操作
cd ~/
# 自备梯子
wget https://storage.googleapis.com/kubernetes-release/release/v1.11.1/kubernetes-node-linux-amd64.tar.gz
# 无梯子 国内七牛云CDN
wget http://ols7lqkih.bkt.clouddn.com/kubernetes-node-linux-amd64-v1.11.1.tar.gz
#无梯子 百度云盘
链接: https://pan.baidu.com/s/1kbz1X_MH3XIjIXiG80tBmA 密码: rk4j
  • clone ansible 脚本项目
1
2
3
4
5
6
cd ~/
git clone https://github.com/goudai/ansible-kubernetes-v1.11.1-ipvs.git
# 解压缩 kubernetes
tar xf kubernetes-node-linux-amd64.tar.gz
# copy 需要的文件到ansbile 的分发目录中,这两个文件会被分发到集群中所有机器
cp kubernetes/node/bin/{kubectl,kubelet} ansible-kubernetes-v1.11.1-ipvs/roles/scp/files
  • 修改hosts文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#切换到项目目录
cd ~/ansible-kubernetes-v1.11.1-ipvs
#修改hosts 文件
vim hosts
#把文件中一下ip替换为自己的ip
[otherMaster] #替换为你的k8s-m2,k8s-m3的ip
192.168.0.102
192.168.0.103
# 如果每个每个机器的密码不一致,请使用如下格式
# 并注释 group_vars/all.yml 中的 ansible_ssh_pass: 111111 这一行
# 192.168.0.102 ansible_ssh_pass=password1 ansible_ssh_port=22
# 192.168.0.103 ansible_ssh_pass=password2 ansible_ssh_port=22
[Node] # 替换为你的k8s-n1 的ip
192.168.0.104
  • 修改全局变量参数group_vars/all.yml
1
2
3
4
5
6
7
8
9
10
11
cd ~/ansible-kubernetes-v1.11.1-ipvs
vim group_vars/all.yml
# TOKEN相关信息可以不用修改 如想修改可以用如下方式重新生成
# TOKEN可以使用head -c 32 /dev/urandom | base64生成替换
# TOKEN_ID可以使用openssl rand 3 -hex生成
# TOKEN_SECRET使用openssl rand 8 -hex

# VIP 和 NETMASK 修改
# 这里的VIP 是keepalived 所要使用的 VIP ,修改为你所在网段的未使用的IP即可
# NETMASK 为VIP的掩码
# INTERFACE_NAME 为让vip绑定的网卡
  • 其余相关参数如果特殊要求可不更改

开始自动化部署

  • 分发 /etc/hosts 文件
1
2
# 主要保证master机器上都有上面修改的hosts信息 
ansible all -m copy -a "src=/etc/hosts dest=/etc/hosts"
  • 分发kubectl,kubelet,cfssl,cni
1
ansible-playbook distribute-file.yml
  • 生成 etcd 和 kubernetes 所需要证书并分发到所有节点
1
2
cd ~/ansible-kubernetes-v1.11.1-ipvs
ansible-playbook gen-etcd-ca-kubenetes-ca.yml
  • 部署master kubelet systemd 方式部署 并启动
1
2
cd ~/ansible-kubernetes-v1.11.1-ipvs
ansible-playbook deploy-master.yml
  • 部署执行完成之后 请等待kubernetes集群 启动完成
  • 可通过tail -f /var/log/messages 查看kubelet相关日志输出
  • 正确启动成功之后的输出如下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
[root@k8s-m1 kubernetes]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}

[root@k8s-m1 kubernetes]# kubectl get svc
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h

[root@k8s-m1 kubernetes]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-m1 NotReady master 52s v1.11.1
k8s-m2 NotReady master 51s v1.11.1
k8s-m3 NotReady master 50s v1.11.1

[root@k8s-m1 kubernetes]# kubectl -n kube-system get po
NAME READY STATUS RESTARTS AGE
etcd-k8s-m1 1/1 Running 0 7m
etcd-k8s-m2 1/1 Running 0 8m
etcd-k8s-m3 1/1 Running 0 7m
haproxy-k8s-m1 1/1 Running 0 7m
haproxy-k8s-m2 1/1 Running 0 8m
haproxy-k8s-m3 1/1 Running 0 8m
keepalived-k8s-m1 1/1 Running 0 8m
keepalived-k8s-m2 1/1 Running 0 7m
keepalived-k8s-m3 1/1 Running 0 7m
kube-apiserver-k8s-m1 1/1 Running 0 7m
kube-apiserver-k8s-m2 1/1 Running 0 6m
kube-apiserver-k8s-m3 1/1 Running 0 7m
kube-controller-manager-k8s-m1 1/1 Running 0 8m
kube-controller-manager-k8s-m2 1/1 Running 0 8m
kube-controller-manager-k8s-m3 1/1 Running 0 8m
kube-scheduler-k8s-m1 1/1 Running 0 8m
kube-scheduler-k8s-m2 1/1 Running 0 8m
kube-scheduler-k8s-m3 1/1 Running 0 8m
  • 启用 Bootstrap Tokens 认证
1
2
3
4
5
6
7
8
cd ~/ansible-kubernetes-v1.11.1-ipvs
ansible-playbook tls-bootstrap-autoapprove-RBAC.yml
# 查看csr
[root@k8s-m1 ansible-kubernetes-v1.11.1-ipvs]# kubectl get csr
NAME AGE REQUESTOR CONDITION
csr-5l8dr 1m system:node:k8s-m3 Approved,Issued
csr-chcb4 1m system:node:k8s-m2 Approved,Issued
csr-sb9f6 1m system:node:k8s-m1 Approved,Issued
  • 至此 kubernetes 三节点HA的master部署完毕

添加node节点

  • 默认添加hosts文件中的[Node] 熟悉的host
1
2
3
4
cd ~/ansible-kubernetes-v1.11.1-ipvs
#请保证执行以前脚本前hosts中[Node] 属性没有已经添加过的节点
ansible-playbook add-node.yml
#成功添加节点之后 请注释掉hosts中的所有节点 下次新增节点的时候 继续追加host即可

添加插件

  • 所有插件都使用容器进行部署

部署kube-proxy 插件

  • 如clusterCIDR 有改动 请替换掉 ‘10.244.0.0/16’
  • 将 ${https://vip:6443} 替换成为 group_vars/all.yml 中的vip
  • 如 vip = 192.168.0.110 则 ${https://vip:6443} 替换为 https://192.168.0.110:6443
  • 替换完成之后 新建一个kube-proxy.yml的文件 写入替换之后的内容
  • 在任意master上执行 kubectl apply -f kube-proxy.yml 即可完成proxy的部署
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149

    #kub-proxy.yml
    # 镜像 registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.11.1

    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: kube-proxy
    namespace: kube-system
    labels:
    addonmanager.kubernetes.io/mode: Reconcile
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
    name: system:kube-proxy
    labels:
    addonmanager.kubernetes.io/mode: Reconcile
    subjects:
    - kind: ServiceAccount
    name: kube-proxy
    namespace: kube-system
    roleRef:
    kind: ClusterRole
    name: system:node-proxier
    apiGroup: rbac.authorization.k8s.io
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
    labels:
    app: kube-proxy
    name: kube-proxy
    namespace: kube-system
    data:
    config.conf: |-
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: 0.0.0.0
    clientConnection:
    acceptContentTypes: ""
    burst: 10
    contentType: application/vnd.kubernetes.protobuf
    kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
    qps: 5
    clusterCIDR: '10.244.0.0/16'
    configSyncPeriod: 15m0s
    conntrack:
    max: null
    maxPerCore: 32768
    min: 131072
    tcpCloseWaitTimeout: 1h0m0s
    tcpEstablishedTimeout: 24h0m0s
    enableProfiling: false
    healthzBindAddress: 0.0.0.0:10256
    hostnameOverride: ""
    iptables:
    masqueradeAll: false
    masqueradeBit: 14
    minSyncPeriod: 0s
    syncPeriod: 30s
    ipvs:
    masqueradeAll: true
    minSyncPeriod: 5s
    scheduler: "rr"
    syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
    nodePortAddresses: null
    oomScoreAdj: -999
    portRange: ""
    resourceContainer: /kube-proxy
    udpIdleTimeout: 250ms
    kubeconfig.conf: |-
    apiVersion: v1
    kind: Config
    clusters:
    - cluster:
    certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
    server: ${https://vip:6443}
    name: default
    contexts:
    - context:
    cluster: default
    namespace: default
    user: default
    name: default
    current-context: default
    users:
    - name: default
    user:
    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
    labels:
    k8s-app: kube-proxy
    name: kube-proxy
    namespace: kube-system
    spec:
    selector:
    matchLabels:
    k8s-app: kube-proxy
    template:
    metadata:
    labels:
    k8s-app: kube-proxy
    spec:
    tolerations:
    - effect: NoSchedule
    key: node-role.kubernetes.io/master
    - effect: NoSchedule
    key: node.cloudprovider.kubernetes.io/uninitialized
    value: "true"
    hostNetwork: true
    restartPolicy: Always
    serviceAccount: kube-proxy
    serviceAccountName: kube-proxy
    containers:
    - name: kube-proxy
    image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.11.1
    command:
    - /usr/local/bin/kube-proxy
    - --config=/var/lib/kube-proxy/config.conf
    - --v=2
    imagePullPolicy: IfNotPresent
    securityContext:
    privileged: true
    volumeMounts:
    - mountPath: /var/lib/kube-proxy
    name: kube-proxy
    - mountPath: /run/xtables.lock
    name: xtables-lock
    - mountPath: /lib/modules
    name: lib-modules
    readOnly: true
    volumes:
    - configMap:
    defaultMode: 420
    name: kube-proxy
    name: kube-proxy
    - hostPath:
    path: /run/xtables.lock
    type: FileOrCreate
    name: xtables-lock
    - hostPath:
    path: /lib/modules
    name: lib-modules

部署flannel 插件

  • 如Network 有改动 请替换掉 ‘10.244.0.0/16’
  • 在任意master上 新建一个kube-flannel.yml的文件
  • kubectl apply -f kube-flannel.yml 即可完成flannel的部署
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#kube-flannel.yml
# 镜像 quay.io/coreos/flannel:v0.10.0-amd64
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.10.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

部署coredns

  • coredns.yml 无法直接使用需要填入自己的dnsip 故使用coredns.yaml.sed+deploy.sh 完成yml文件的生成
  • 新建如下两个文件 给deploy +x 执行权限
  • ./deploy.sh -i ${ClusterDns} 这里的ClusterDns 取group_vars/all.yml中的ClusterDns即可
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# deploy.sh 
#!/bin/bash

# Deploys CoreDNS to a cluster currently running Kube-DNS.

show_help () {
cat << USAGE
usage: $0 [ -r REVERSE-CIDR ] [ -i DNS-IP ] [ -d CLUSTER-DOMAIN ] [ -t YAML-TEMPLATE ]
-r : Define a reverse zone for the given CIDR. You may specifcy this option more
than once to add multiple reverse zones. If no reverse CIDRs are defined,
then the default is to handle all reverse zones (i.e. in-addr.arpa and ip6.arpa)
-i : Specify the cluster DNS IP address. If not specificed, the IP address of
the existing "kube-dns" service is used, if present.
USAGE
exit 0
}

# Simple Defaults
CLUSTER_DOMAIN=cluster.local
YAML_TEMPLATE=`pwd`/coredns.yaml.sed


# Get Opts
while getopts "hr:i:d:t:" opt; do
case "$opt" in
h) show_help
;;
r) REVERSE_CIDRS="$REVERSE_CIDRS $OPTARG"
;;
i) CLUSTER_DNS_IP=$OPTARG
;;
d) CLUSTER_DOMAIN=$OPTARG
;;
t) YAML_TEMPLATE=$OPTARG
;;
esac
done

# Conditional Defaults
if [[ -z $REVERSE_CIDRS ]]; then
REVERSE_CIDRS="in-addr.arpa ip6.arpa"
fi
if [[ -z $CLUSTER_DNS_IP ]]; then
# Default IP to kube-dns IP
CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")
if [ $? -ne 0 ]; then
>&2 echo "Error! The IP address for DNS service couldn't be determined automatically. Please specify the DNS-IP with the '-i' option."
exit 2
fi
fi

sed -e s/CLUSTER_DNS_IP/$CLUSTER_DNS_IP/g -e s/CLUSTER_DOMAIN/$CLUSTER_DOMAIN/g -e "s?REVERSE_CIDRS?$REVERSE_CIDRS?g" $YAML_TEMPLATE
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
#coredns.yaml.sed
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
kubernetes CLUSTER_DOMAIN REVERSE_CIDRS {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
loadbalance
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: coredns/coredns:1.2.0
imagePullPolicy: IfNotPresent
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: CLUSTER_DNS_IP
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

部署Dashboard

  • 部署dashboard 插件
1
2
3
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
#查看部署情况
kubectl -n kube-system get po,svc -l k8s-app=kubernetes-dashboard
  • 创建open-api Cluster Role Binding
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: open-api
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:anonymous
EOF
  • 创建dashboard service account
1
2
kubectl -n kube-system create sa dashboard
kubectl create clusterrolebinding dashboard --clusterrole cluster-admin --serviceaccount=kube-system:dashboard
  • 获取login token
1
kubectl -n kube-system describe secrets | sed -rn '/\sdashboard-token-/,/^token/{/^token/s#\S+\s+##p}'
  • dashboard url 地址
1
https://{vip}:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/

kubectl 自动补全

1
2
3
4
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
可以将 source <(kubectl completion bash) 命令 写入 $HOME/.bashrc中.

参考资料