侧边栏壁纸
博主头像
kevin's blog! 博主等级

行动起来,活在当下

  • 累计撰写 27 篇文章
  • 累计创建 17 个标签
  • 累计收到 0 条评论

目 录CONTENT

文章目录
k8s

k8s-1.28.0集群单master部署

kevin
2024-03-30 / 0 评论 / 0 点赞 / 36 阅读 / 0 字

前置准备:

配置免密:

rm -rf ~/.ssh

ssh-keygen

for i in 192.168.122.{4..9};do ssh-copy-id -i .ssh/id_rsa.pub $i;done

一、环境准备

1、界面颜色

echo "PS1='\[\033[35m\][\[\033[00m\]\[\033[31m\]\u\[\033[33m\]\[\033[33m\]@\[\033[03m\]\[\033[35m\]\h\[\033[00m\] \[\033[5;32m\]\w\[\033[00m\]\[\033[35m\]]\[\033[00m\]\[\033[5;31m\]\\$\[\033[00m\] '" >> ~/.bashrc && source ~/.bashrc

2、yum源配置

# 腾讯源
cd /etc/yum.repos.d && mkdir bak && mv /etc/yum.repos.d/* /etc/yum.repos.d/bak

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/CentOS-Epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo

yum clean all
yum makecache

3、设置主机名

hostnamectl set-hostname master1 && bash
hostnamectl set-hostname worker1 && bash
hostnamectl set-hostname worker2 && bash
hostnamectl set-hostname worker3 && bash
hostnamectl set-hostname harbor && bash
hostnamectl set-hostname gitlab && bash

4、添加hosts解析

cat >> /etc/hosts << EOF
192.168.122.4 master1
192.168.122.5 worker1
192.168.122.6 worker2
192.168.122.7 worker3
192.168.122.8 harbor harbor.kevinspace.top
192.168.122.9 gitlab
EOF

5、配置集群免密

ssh-keygen

for i in master1 worker1 worker2 worker3 harbor gitlab;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

6、同步时间

yum -y install ntp

systemctl enable ntpd --now

7、永久关闭seLinux(需重启系统生效)

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

# 查看selinux状态
getenforce

8、永久关闭swap(需重启系统生效)

sed -i 's/.*swap.*/#&/g' /etc/fstab

# 查看swap状态
free -h

9、关闭防火墙、清空iptables规则

yum -y install yum-utilsipset ipvsadm tree git iptables-services

systemctl disable firewalld && systemctl stop firewalld

iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X && iptables -P FORWARD ACCEPT && service iptables save

10、关闭 NetworkManager

# 华为云服务器不能执行这条
# 关闭NetworkManager
systemctl disable NetworkManager && systemctl stop NetworkManager

# 开启NetworkManager
systemctl enable NetworkManager && systemctl start NetworkManager

11、加载IPVS模块

yum -y install ipset ipvsadm

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

modprobe -- nf_conntrack

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

12、开启br_netfilter、ipv4 路由转发

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# 应用 sysctl 参数而不重新启动
sudo sysctl --system

# 查看是否生效
lsmod | grep br_netfilter
lsmod | grep overlay

sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward

13、内核优化

cat > /etc/sysctl.d/99-sysctl.conf << 'EOF'
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).

# Controls IP packet forwarding

# Controls source route verification
net.ipv4.conf.default.rp_filter = 1

# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0

# Controls the System Request debugging functionality of the kernel

# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded applications.
kernel.core_uses_pid = 1

# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1

# Controls the maximum size of a message, in bytes
kernel.msgmnb = 65536

# Controls the default maxmimum size of a mesage queue
kernel.msgmax = 65536

net.ipv4.conf.all.promote_secondaries = 1
net.ipv4.conf.default.promote_secondaries = 1
net.ipv6.neigh.default.gc_thresh3 = 4096

kernel.sysrq = 1
net.ipv6.conf.all.disable_ipv6=0
net.ipv6.conf.default.disable_ipv6=0
net.ipv6.conf.lo.disable_ipv6=0
kernel.numa_balancing = 0
kernel.shmmax = 68719476736
kernel.printk = 5
net.core.rps_sock_flow_entries=8192
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_local_reserved_ports=60001,60002
net.core.rmem_max=16777216
fs.inotify.max_user_watches=524288
kernel.core_pattern=core
net.core.dev_weight_tx_bias=1
net.ipv4.tcp_max_orphans=32768
kernel.pid_max=4194304
kernel.softlockup_panic=1
fs.file-max=3355443
net.core.bpf_jit_harden=1
net.ipv4.tcp_max_tw_buckets=32768
fs.inotify.max_user_instances=8192
net.core.bpf_jit_kallsyms=1
vm.max_map_count=262144
kernel.threads-max=262144
net.core.bpf_jit_enable=1
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_wmem=4096 12582912    16777216
net.core.wmem_max=16777216
net.ipv4.neigh.default.gc_thresh1=2048
net.core.somaxconn=32768
net.ipv4.neigh.default.gc_thresh3=8192
net.ipv4.ip_forward=1
net.ipv4.neigh.default.gc_thresh2=4096
net.ipv4.tcp_max_syn_backlog=8096
net.bridge.bridge-nf-call-iptables=1
net.ipv4.tcp_rmem=4096  12582912        16777216
EOF

# 应用 sysctl 参数而不重新启动
sudo sysctl --system

14、设置资源配置文件

cat >> /etc/security/limits.conf << 'EOF'
* soft nofile 100001
* hard nofile 100002
root soft nofile 100001
root hard nofile 100002
* soft memlock unlimited
* hard memlock unlimited
* soft nproc 254554
* hard nproc 254554
* soft sigpending 254554
* hard sigpending 254554
EOF
 
grep -vE "^\s*#" /etc/security/limits.conf
 
ulimit -a

15、升级内核为5.4版本(需重启系统生效)

# https://elrepo.org/tiki/kernel-lt
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-6.el7.elrepo.noarch.rpm

yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

yum --enablerepo=elrepo-kernel install -y kernel-lt

grub2-set-default 0

# 这里先重启加载配置再继续
reboot

二、安装containerd-1.6.24(官方源)(所有节点)

wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

yum makecache fast

yum list containerd.io --showduplicates | sort -r

yum -y install containerd.io-1.6.24-3.1.el7
# 镜像加速配置无需重启服务,即可生效
containerd config default | sudo tee /etc/containerd/config.toml

# 修改cgroup Driver为systemd
sed -ri 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml

# 更改sandbox_image
sed -ri 's#registry.k8s.io\/pause:3.6#registry.aliyuncs.com\/google_containers\/pause:3.9#' /etc/containerd/config.toml

# 添加镜像加速
# https://github.com/DaoCloud/public-image-mirror

# 1、指定配置文件目录
sed -i 's/config_path = ""/config_path = "\/etc\/containerd\/certs.d\/"/g' /etc/containerd/config.toml

# 2、配置加速
# docker.io 镜像加速
mkdir -p /etc/containerd/certs.d/docker.io
cat > /etc/containerd/certs.d/docker.io/hosts.toml << 'EOF'
server = "https://docker.io" # 源镜像地址

[host."https://xk9ak4u9.mirror.aliyuncs.com"] # 镜像加速地址
  capabilities = ["pull","resolve"]
  
[host."https://dockerproxy.com"] # 镜像加速地址
  capabilities = ["pull", "resolve"]

[host."https://docker.mirrors.ustc.edu.cn"] # 镜像加速地址
  capabilities = ["pull","resolve"]
  
[host."https://registry-1.docker.io"]
  capabilities = ["pull","resolve","push"]
EOF

# registry.k8s.io 镜像加速
mkdir -p /etc/containerd/certs.d/registry.k8s.io
cat > /etc/containerd/certs.d/registry.k8s.io/hosts.toml << 'EOF'
server = "https://registry.k8s.io"

[host."https://k8s.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# quay.io 镜像加速
mkdir -p /etc/containerd/certs.d/quay.io
cat > /etc/containerd/certs.d/quay.io/hosts.toml << 'EOF'
server = "https://quay.io"

[host."https://quay.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

systemctl daemon-reload

systemctl enable containerd --now

systemctl restart containerd
systemctl status containerd
#设置crictl
cat << EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false
EOF

安装nerdctl

wget https://github.com/containerd/nerdctl/releases/download/v1.7.2/nerdctl-1.7.2-linux-amd64.tar.gz

tar -xf nerdctl-1.7.2-linux-amd64.tar.gz

mv nerdctl /bin/
nerdctl version

安装buildkit

# 下载地址
wget https://github.com/moby/buildkit/releases/download/v0.12.1/buildkit-v0.12.1.linux-amd64.tar.gz

# 解压文件
tar -zxvf buildkit-v0.12.2.linux-amd64.tar.gz
ln -s /root/bin/buildctl /usr/local/bin/
ln -s /root/bin/buildkitd /usr/local/bin/

#使用Systemd来管理buildkitd,创建如下所示的systemd unit文件
cat >> /etc/systemd/system/buildkit.service <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Service]
ExecStart=/usr/local/bin/buildkitd --oci-worker=false --containerd-worker=true

[Install]
WantedBy=multi-user.target
EOF

# 启动buildkitd
systemctl daemon-reload
systemctl enable buildkit --now
systemctl status buildkit

# 验证 nerdctl与buildctl
$ nerdctl version
Client:
 Version:       v1.7.2
 OS/Arch:       linux/amd64
 Git commit:    e32c4b023bf41e5c8325cfb893a53cefb5fc68ed
 buildctl:
  Version:      v0.12.2
  GitCommit:    567a99433ca23402d5e9b9f9124005d2e59b8861

Server:
 containerd:
  Version:      1.6.24
  GitCommit:    61f9fd88f79f081d64d6fa3bb1a0dc71ec870523
 runc:
  Version:      1.1.9
  GitCommit:    v1.1.9-0-gccaecfc

三、安装k8s(kubeadm-1.28.0、kubelet-1.28.0、kubectl-1.28.0)(清华源)

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

yum makecache fast

yum -y install kubeadm-1.28.0 kubelet-1.28.0 kubectl-1.28.0


systemctl enable --now kubelet

四、初始化 k8s-1.28.0 集群

mkdir ~/kubeadm_init && cd ~/kubeadm_init

kubeadm config print init-defaults > kubeadm-init.yaml
cat > ~/kubeadm_init/kubeadm-init.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.122.4 # 执行初始化的master的ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master1  #执行初始化master主机名
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master1
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.28.0  # 上面安装的k8s版本
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
EOF
# 查看所需镜像列表
kubeadm config images list --config kubeadm-init.yaml
# 预拉取镜像
kubeadm config images pull --config kubeadm-init.yaml
# 初始化
kubeadm init --config=kubeadm-init.yaml | tee kubeadm-init.log

[root@master1 ~/kubeadm_init]# kubeadm init --config=kubeadm-init.yaml | tee kubeadm-init.log
[init] Using Kubernetes version: v1.28.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master1] and IPs [192.168.122.4 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master1] and IPs [192.168.122.4 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 13.005940 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/master1:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.122.4:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:90b45c478a2e27df28010ac9b293bc0fa5187115a6bf3e0e6aa10a14c6d897a2

如果初始化失败,执行下面命令重新初始化:

rm -rf $HOME/.kube
kubeadm reset

重置或清除iptables规则或IPVS表
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

# 当前master节点配置 kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# worker加入节点
kubeadm join 192.168.122.4:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:90b45c478a2e27df28010ac9b293bc0fa5187115a6bf3e0e6aa10a14c6d897a2

五、安装 k8s 集群网络(calico)

查看calico与k8s的版本对应关系

https://docs.tigera.io/calico/3.26/getting-started/kubernetes/requirements

这里k8s-1.28.0,所以使用calico-v3.26.4版本(版本对应很关键)

mkdir -p ~/calico-yml

cd ~/calico-yml && wget https://github.com/projectcalico/calico/raw/v3.26.4/manifests/calico.yaml
# 1 修改CIDR
sed -i 's/192\.168/10\.244/g' calico.yaml

sed -i 's/# \(- name: CALICO_IPV4POOL_CIDR\)/\1/' calico.yaml

sed -i 's/# \(\s*value: "10.244.0.0\/16"\)/\1/' calico.yaml

# 2 指定网卡(ens33为本地网卡名字(自己机器啥网卡就改啥))
sed -i '/value: "k8s,bgp"/a \            - name: IP_AUTODETECTION_METHOD' \calico.yaml

sed -i '/- name: IP_AUTODETECTION_METHOD/a \              value: "interface=eth0"' \calico.yaml

kubectl apply -f ~/calico-yml/calico.yaml

六、coredns 解析测试是否正常

[root@master1 ~]# kubectl run -it --rm dns-test --image=busybox:1.28.0 sh
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local   # 看到这个说明dns解析正常

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
/ #

七、helm、kubens、kubectl补全

helm

cd && wget https://repo.huaweicloud.com/helm/v3.13.2/helm-v3.13.2-linux-amd64.tar.gz

tar xf ~/helm-v3.13.2-linux-amd64.tar.gz

cp ~/linux-amd64/helm /usr/local/sbin/helm

rm -rf ~/helm-v3.13.2-linux-amd64.tar.gz && rm -rf ~/linux-amd64

helm version

kubectx、kubens

wget -O /usr/local/sbin/kubens https://github.com/ahmetb/kubectx/raw/v0.9.5/kubens

chmod +x /usr/local/sbin/kubens

wget -O /usr/local/sbin/kubectx https://github.com/ahmetb/kubectx/raw/v0.9.5/kubectx

chmod +x /usr/local/sbin/kubectx

kubectl 补全

yum -y install bash-completion

source /etc/profile.d/bash_completion.sh

echo "source <(crictl completion bash)" >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc
echo "source <(helm completion bash)" >> ~/.bashrc

source ~/.bashrc && su -

别名(根据个人喜好添加)

cat >> ~/.bashrc << 'EOF'
alias pod='kubectl get pod'
alias po='kubectl get pod'
alias svc='kubectl get svc'
alias ns='kubectl get ns'
alias pvc='kubectl get pvc'
alias pv='kubectl get pv'
alias sc='kubectl get sc'
alias ingress='kubectl get ingress'
alias all='kubectl get all'
alias deployment='kubectl get deployments'
alias vs='kubectl get vs'
alias gateway='kubectl get gateway'
EOF

source ~/.bashrc
0
k8s

评论区