准备环境

hostnamectl set-hostname openEuler.master01

dnf update -y
dnf install -y ntp vim
reboot
cat >> /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.137.36 openEuler.master01
192.168.137.37 openEuler.node01
EOF
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
#selinux临时关闭  selinux永久生效
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 关闭swap,临时和永久
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
crontab -e
#开启内核路由转发
sed -i 's/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
#添加网桥过滤及内核转发配置文件
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
EOF
#配置加载br_netfilter模块
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
#加载br_netfilter overlay模块
modprobe br_netfilter
modprobe overlay
#查看是否加载 显式类似:br_netfilter           22256  0
lsmod | grep br_netfilter
#使用默认配置文件生效 或sysctl --system
sysctl -p
#使用新添加配置文件生效
sysctl -p /etc/sysctl.d/k8s.conf
#安装ipset及ipvsadm
yum -y install ipset ipvsadm
#配置ipvsadm模块加载方式.添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.module <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- nf_conntrack
EOF

安装docker-ce

#授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.module &&  /etc/sysconfig/modules/ipvs.module
#查看对应的模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4


yum install -y  device-mapper-persistent-data lvm2
# 添加docker的yum源,使用arch命令查看cpu架构,将下列x86_64替换为对应架构
cat > /etc/yum.repos.d/docker-ce.repo <<EOF
[centos-extras]
name=Centos extras - x86_64
baseurl=http://mirror.centos.org/centos/7/extras/x86_64
enabled=1
gpgcheck=0
[docker-ce-stable]
name=Docker CE Stable - x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/x86_64/stable
enabled=1
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-x86_64/stable
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test]
name=Docker CE Test - x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/x86_64/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-x86_64/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly]
name=Docker CE Nightly - x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/x86_64/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo x86_64
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-x86_64/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
EOF




# 刷新yum缓存
yum makecache
yum list docker-ce --showduplicates | sort -r
yum list container.io --showduplicates | sort -r
yum list Containerd.io --showduplicates | sort -r
cd /root/
yum install -y docker-ce-20.10.9 docker-ce-cli-20.10.9 containerd.io docker-compose-plugin
mkdir /etc/docker/

# 修改docker存储位置
touch /etc/docker/daemon.json

cat <<"EOF" > /etc/docker/daemon.json
{
  "data-root": "/data/docker",
  "registry-mirrors" : [
    "https://nao1uw63.mirror.aliyuncs.com"
  ],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

#启用并运行docker
systemctl enable docker --now
# 基础信息查看
docker info

安装cri-dockerd

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.10/cri-dockerd-0.3.10.amd64.tgz
tar -zxvf cri-dockerd-0.3.10.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd

# 配置启动文件
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

# 生成socket 文件
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF


# 启动CRI-DOCKER
systemctl daemon-reload
systemctl start cri-docker
systemctl enable cri-docker
systemctl is-active cri-docker

安装kubelet

cat >/etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


yum clean all && yum makecache
# 查看可用版本
yum list  kubeadm  kubelet kubectl --showduplicates | sort -r
# 选择指定版本安装
yum install -y kubelet-1.23.9 kubeadm-1.23.9 kubectl-1.23.9

#为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容
cat <<EOF > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF

# 设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
systemctl enable kubelet --now

#此时kubelet状态是activating的,不是active的
dnf install conntrack -y

reboot

部署Master节点(只在master节点操作)

配置文件部署

也可以使用下面的配置文件形式进行部署Master

# 生成kubeadm.yaml文件
kubeadm init --config kubeadm.yaml


# 修改配置文件
[root@openeuler ~]# cat kubeadm.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.137.36     # 修改成Master节点的ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: openeuler.master01            # 修改成Master的hostname
  imagePullPolicy: IfNotPresent
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {type: "CoreDNS"} 
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers  # 修改成国内源
kind: ClusterConfiguration
kubernetesVersion: 1.23.9       # 指定k8s的版本
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16           # 添加pod的ip段
  serviceSubnet: 10.96.0.0/12
scheduler: {}
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

新增Master节点(不做多Master节点略)

# master节点指定以下两个命令
kubeadm init phase upload-certs --upload-certs
kubeadm token create --print-join-command
# 显示如下 kubeadm join 172.16.64.2:6443 --token xhsmiv.ggj00ojs6dvv8b23     --discovery-token-ca-cert-hash sha256:5211bd42a2e81b933b52ec83686f93ae6212542d22d00c621fad20f0dc9592b4

#注意事项:
# 不要使用 --experimental-control-plane,会报错
# 要加上--control-plane --certificate-key ,不然就会添加为node节点而不是master
# join的时候节点上不要部署,如果部署了kubeadm reset后再join
# !!!docker基础上安装的k8s在执行  join或reset  时,必须加上 --cri-socket unix://var/run/cri-dockerd.sock
kubeadm join 192.168.137.37:6443 --token xhsmiv.ggj00ojs6dvv8b23 \
        --discovery-token-ca-cert-hash sha256:5211bd42a2e81b933b52ec83686f93ae6212542d22d00c621fad20f0dc9592b4 \
    --control-plane --certificate-key  5d817a5480c54bb079eab4f7b75b4dfe21bd36e059dfb46bf39f724adb3349aa \
    --cri-socket unix://var/run/cri-dockerd.sock

如遇下面错误

[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
error execution phase preflight: 
One or more conditions for hosting a new control plane instance is not satisfied.

unable to add a new control plane instance to a cluster that doesn't have a stable controlPlaneEndpoint address

Please ensure that:
* The cluster has a stable controlPlaneEndpoint address.
* The certificates that must be shared among control plane instances are provided.


To see the stack trace of this error execute with --v=5 or higher

解决办法

# 查看kubeadm-config.yaml
kubectl -n kube-system get cm kubeadm-config -oyaml
# 发现没有controlPlaneEndpoint

#添加controlPlaneEndpoint
kubectl -n kube-system edit cm kubeadm-config

kind: ClusterConfiguration
kubernetesVersion: v1.23.9
controlPlaneEndpoint: 192.168.137.37:6443 # 添加此行
# 然后再在准备添加为master的节点上执行kubeadm join的命令

部署节点

hostnamectl set-hostname openEuler.node01

dnf update -y
dnf install -y ntp vim
reboot

cat >> /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.137.36 openEuler.master01
192.168.137.37 openEuler.node01
EOF


systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
#selinux临时关闭  selinux永久生效
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 关闭swap,临时和永久
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
crontab -e
#开启内核路由转发
sed -i 's/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
#添加网桥过滤及内核转发配置文件
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
EOF
#配置加载br_netfilter模块
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
#加载br_netfilter overlay模块
modprobe br_netfilter
modprobe overlay
#查看是否加载 显式类似:br_netfilter           22256  0
lsmod | grep br_netfilter
#使用默认配置文件生效 或sysctl --system
sysctl -p
#使用新添加配置文件生效
sysctl -p /etc/sysctl.d/k8s.conf
#安装ipset及ipvsadm
yum -y install ipset ipvsadm
#配置ipvsadm模块加载方式.添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.module <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- nf_conntrack
EOF

安装docker-ce/cri-dockerd/kubelet

按照Master节点上安装docker-ce、cri-dockerd、kubelet

加入节点

# 在master节点执行假如的
[root@openEuler ~]# kubeadm token create --print-join-command
kubeadm join 192.168.137.36:6443 --token 97360t.ujup6gorhwe4s15k --discovery-token-ca-cert-hash sha256:99d18676c44537d333563adba90176bb7b758cc224611778985fc882317c7363 

# 上面的输出,到node节点执行
kubeadm join 192.168.137.36:6443 --token ebs7s8.t5umsmsfnj57be0g --discovery-token-ca-cert-hash sha256:99d18676c44537d333563adba90176bb7b758cc224611778985fc882317c7363

部署Calico网络插件

github: https://github.com/projectcalico/calico

切换到需要的版本v3.23.1

https://github.com/projectcalico/calico/releases?expanded=true&page=4&q=v3.23.1

wget https://github.com/projectcalico/calico/releases/download/v3.23.1/release-v3.23.1.tgz

tar -zxf release-v3.23.1.tgz

cd release-v3.23.1

cd images/

docker load -i calico-cni.tar
docker load -i calico-node.tar
docker load -i calico-kube-controllers.tar


vim  calico.yaml 

# 找到CALICO_IPV4POOL_CIDR,打开注释,并修改ip段为init时指定的ip段
................
            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"
................

kubectl apply -f calico.yaml

安装kubectl自动提示

yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

安装Kuboard

docker run -d \
  --restart=unless-stopped \
  --name=kuboard \
  -p 88:80/tcp \
  -p 10081:10081/tcp \
  -e KUBOARD_ENDPOINT="http://192.168.137.36:80" \
  -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
  -v /data/kuboard-data:/data \
  swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard:v3

安装StorageClass(Longhorn)

安装Longhorn存储

# 各个节点执行命令,安装基础环境
yum --setopt=tsflags=noscripts install -y iscsi-initiator-utils
echo "InitiatorName=$(/sbin/iscsi-iname)" > /etc/iscsi/initiatorname.iscsi
systemctl enable iscsid
systemctl start iscsid
yum install -y nfs-utils

modprobe iscsi_tcp

# master节点进行环境核验
dnf install -y jq
curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/scripts/environment_check.sh | bash


# 安装longhorn
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml
kubectl get pods --namespace longhorn-system --watch

卸载Longhorn

kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/uninstall/uninstall.yaml
kubectl get job/longhorn-uninstall -n longhorn-system -w
kubectl delete -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml
kubectl delete -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/uninstall/uninstall.yaml

FAQ

MountVolume.SetUp failed for volume "config-volume" : object "kube-system"/"coredns" not registered

Events:
  Type     Reason       Age               From               Message
  ----     ------       ----              ----               -------
  Normal   Scheduled    40s               default-scheduler  Successfully assigned kube-system/coredns-6d8c4cb4d-bdwcp to openeuler.master01
  Warning  FailedMount  9s (x7 over 40s)  kubelet            MountVolume.SetUp failed for volume "config-volume" : object "kube-system"/"coredns" not registered

解决方案:

kubeadm reset

使用配置文件重新部署

results matching ""

    No results matching ""