Kubekey部署k8s

各种教程·supo的文章 · 昨天 · 14 人浏览

环境

系统环境

OS: openEuler 24.03 (LTS)

用户: root下

预部署K8s版本:v1.32.7

部署工具:kubekey 版本4.0.3

主机信息

主机名 IP 说明
k8s-master01 192.168.20.161 控制节点+工作节点+etcd
k8s-master02 192.168.20.162 控制节点+工作节点+etcd
k8s-master03 192.168.20.163 控制节点+工作节点+etcd
k8s-node1 192.168.20.164 工作节点
k8s-node2 192.168.20.165 工作节点

配置

关闭selinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

安装依赖

yum update 
yum install epel-release -y
yum install socat conntrack ebtables ipset curl ipvsadm tar -y

软件包必须安装,若不安装部署会报错

关闭swap分区

swapoff -a && sysctl -w vm.swappiness=0

去除/etc/fstab的挂载,然后若有swapfile也可删除

设置时间同步

vi etc/chrony.conf

server xxx.xxx.xxx.xxx iburst
server xxx.xxx.xxx.xxx iburst

systemctl enable chronyd
systemctl restart chronyd
chronyc sources -v

配置limit

ulimit -SHn 65535
vim /etc/security/limits.conf
# 按大写G跳转到末尾添加如下内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

优化内核

vim /etc/sysctl.d/k8s.conf
##添加以下内容
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

生效并重启

sysctl --system && reboot

部署

下载kubekey

export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | sh -

生成配置示例

sudo chmod +x kk
 ./kk create config --with-kubernetes 1.32.7

修改配置示例

vi config-sample.yaml
参考配置

apiVersion: kubekey.kubesphere.io/v1alpha2

kind: Cluster

metadata:

name: sample

spec:

hosts:

- {name: master01, address: 192.168.20.161, internalAddress: 192.168.20.161, user: root, password: "xxxxxx"}

- {name: master02, address: 192.168.20.162, internalAddress: 192.168.20.162, user: root, password: "xxxxxx"}

- {name: master03, address: 192.168.20.163, internalAddress: 192.168.20.163, user: root, password: "xxxxxx"}

- {name: node1, address: 192.168.20.164, internalAddress: 192.168.20.164, user: root, password: "xxxxxx"}

- {name: node2, address: 192.168.20.165, internalAddress: 192.168.20.165, user: root, password: "xxxxxx"}

roleGroups:

etcd:

- master01

- master02

- master03

control-plane:

- master01

- master02

- master03

worker:

- master01

- master02

- master03

- node1

- node2

controlPlaneEndpoint:

domain: lb.supo.local

address: ""

port: 6443

kubernetes:

version: "v1.32.7"

clusterName: cluster.local

autoRenewCerts: true

containerManager: containerd

etcd:

type: kubekey

network:

plugin: calico

kubePodsCIDR: 10.233.64.0/18

kubeServiceCIDR: 10.233.0.0/18

## multus support. https://github.com/k8snetworkplumbingwg/multus-cni

multusCNI:

enabled: false

registry:

privateRegistry: ""

namespaceOverride: ""

registryMirrors: []

insecureRegistries: []

addons: []

创建k8s集群

 ./kk create cluster -f config-sample.yaml

安装成功显示:Pipeline[CreateclusterPipeline]executesuccessfully

验证参考

[root@master01 ~]# kubectl get pod -A

NAMESPACE NAME READY STATUS RESTARTS AGE

kube-system calico-kube-controllers-678fc69664-k4n66 1/1 Running 0 87m

kube-system calico-node-7mmdq 1/1 Running 0 87m

kube-system calico-node-dpp85 1/1 Running 0 87m

kube-system calico-node-hqpkd 1/1 Running 0 87m

kube-system calico-node-kgcxq 1/1 Running 0 87m

kube-system calico-node-wtscj 1/1 Running 0 87m

kube-system coredns-6cd47c4b49-4bb7j 1/1 Running 0 87m

kube-system coredns-6cd47c4b49-9z6n4 1/1 Running 0 87m

kube-system kube-apiserver-master01 1/1 Running 0 87m

kube-system kube-apiserver-master02 1/1 Running 0 87m

kube-system kube-controller-manager-master01 1/1 Running 1 (84m ago) 87m

kube-system kube-controller-manager-master02 1/1 Running 0 87m

kube-system kube-proxy-58b4t 1/1 Running 0 87m

kube-system kube-proxy-7fwgh 1/1 Running 0 87m

kube-system kube-proxy-7vggv 1/1 Running 0 87m

kube-system kube-proxy-cfgt6 1/1 Running 0 87m

kube-system kube-proxy-n5kwv 1/1 Running 0 87m

kube-system kube-scheduler-master01 1/1 Running 1 (84m ago) 87m

kube-system kube-scheduler-master02 1/1 Running 0 87m

kube-system nodelocaldns-5chqt 1/1 Running 0 87m

kube-system nodelocaldns-bm2px 1/1 Running 0 87m

kube-system nodelocaldns-fpvvw 1/1 Running 0 87m

kube-system nodelocaldns-kxkxl 1/1 Running 0 87m

kube-system nodelocaldns-z54nz 1/1 Running 0 87m

[root@master01 ~]# kubectl get node

NAME STATUS ROLES AGE VERSION

master01 Ready control-plane,worker 87m v1.32.7

master02 Ready control-plane,worker 87m v1.32.7

master03 Ready control-plane,worker 87m v1.32.7

node1 Ready worker 87m v1.32.7

node2 Ready worker 87m v1.32.7

k8s
Theme Jasmine by Kent Liao