main
黄海 11 months ago
parent 07193b78c9
commit fc07038d4f

@ -1,4 +1,4 @@
## 利用国内源搭建$k8s$集群
## 利用国内源搭建$k8s$集群【版本 V1.29】
### 一、前期准备
@ -170,49 +170,23 @@ EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
```
### 二、开始安装
### 二、部署安装
#### 2.1 安装$Docker$
所有节点安装Docker
获取镜像源
所有节点安装$Docker$
```shell
# 获取镜像源
yum install -y yum-utils
curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum clean all
yum makecache
```
安装
```
# 找出有哪些可用的Docker版本
yum list docker-ce --showduplicates | sort -r
yum list docker-ce-cli --showduplicates | sort -r
yum list containerd.io --showduplicates | sort -r
```
```
docker-ce.x86_64 3:20.10.24-3.el9 docker-ce-stable
...
docker-ce-cli.x86_64 1:20.10.24-3.el9 docker-ce-stable
...
```
```shell
# 开始安装
yum install -y docker-ce docker-ce-cli containerd.io
```
设置开机自启动并启动
```shell
# 设置开机自启动并启动
systemctl enable docker && systemctl start docker
```
@ -225,14 +199,19 @@ tee /etc/docker/daemon.json <<-'EOF'
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
systemctl restart docker
```
```
# 上传 cri-dockerd-0.3.8.amd64.tgz
> **k8sv1.24版本以后使用CRI shim调用流程kubelet(客户端) ->CRI shim(被contained内置) -> containerd -> containerd-shim -> runc**
```shell
# https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8.amd64.tgz
# 使用SFTP上传软件上传 cri-dockerd-0.3.8.amd64.tgz
tar xf cri-dockerd-0.3.8.amd64.tgz
mv cri-dockerd/cri-dockerd /usr/bin/
@ -286,7 +265,7 @@ systemctl enable cri-docker && systemctl start cri-docker && systemctl status cr
#### 2.2 安装 $kubelet、kubeadm、kubectl$
配置k8s源(所有节点)
配置$k8s$源(所有节点)
```shell
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
@ -300,7 +279,7 @@ gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
EOF
```
安装kubelet、kubeadm、kubectl (所有节点)
安装$kubelet、kubeadm、kubectl$ (所有节点)
```shell
# 安装默认版本
@ -311,7 +290,7 @@ yum -y install kubeadm-1.29.0-150500.1.1 kubelet-1.29.0-150500.1.1 kubectl-1.2
```
配置 cgroup 驱动与docker一致(所有节点)
配置$ cgroup$ 驱动与$docker$一致(所有节点)
```shell
cp /etc/sysconfig/kubelet{,.bak}
@ -321,7 +300,7 @@ EOF
systemctl enable kubelet
```
安装自动补全工具
安装自动补全工具【可选】
```
yum install bash-completion -y
@ -364,6 +343,7 @@ registry.k8s.io/pause 3.9 e6f181688397 14 months ag
**办法**
```
# 这四个可以直接下载
docker pull calico/kube-controllers:v3.27.0
docker pull calico/cni:v3.27.0
docker pull calico/pod2daemon-flexvol:v3.27.0
@ -396,13 +376,13 @@ docker tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/registry.k8s.io/pause:3.9 re
```
集群初始化(master节点运行)
集群初始化($master$节点运行)
```
# 初始化集群
kubeadm init --apiserver-advertise-address 10.10.14.200 --kubernetes-version v1.29.0 --pod-network-cidr=10.244.0.0/16 --cri-socket=unix:///var/run/cri-dockerd.sock
# 如果网络有问题,请使用如下命令初始化
# 如果网络有问题,请使用如下命令初始化 【我没用上】
kubeadm init \
--apiserver-advertise-address 10.10.14.200 # master节点ip \
--kubernetes-version v1.29.0 \
@ -413,26 +393,29 @@ kubeadm init \
记录下下面的命令:
```
```shell
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.14.200:6443 --token 3yk4me.5k595v6hm2qz463s \
--discovery-token-ca-cert-hash sha256:9e83f5ebfaefa83523e16d546d56b9f3803d4083a71d18fe49217f72306a2058
```
创建配置目录master
创建配置目录($master$
```
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 创建可永久使用的token
kubeadm token create --ttl 0 --print-join-command
```
node节点执行如下命令添加节点node节点运行
$node$节点执行如下命令添加节点($node$节点运行)
```
> **注意**:在上面返回的命令后一定要加:--cri-socket unix:///var/run/cri-dockerd.sock
```shell
kubeadm join 10.10.14.200:6443 --token 3yk4me.5k595v6hm2qz463s \
--discovery-token-ca-cert-hash sha256:9e83f5ebfaefa83523e16d546d56b9f3803d4083a71d18fe49217f72306a2058 --cri-socket unix:///var/run/cri-dockerd.sock
```
@ -453,7 +436,7 @@ kubectl apply -f kube-flannel.yml
应用$operator$资源清单文件
```
```shell
# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml
kubectl create -f tigera-operator.yaml
```
@ -466,7 +449,7 @@ kubectl create -f tigera-operator.yaml
修改文件第13行修改为使用kubeadm init ----pod-network-cidr对应的IP地址段
```
```yaml
vi custom-resources.yaml
11 ipPools:
12 - blockSize: 26
@ -476,13 +459,13 @@ vi custom-resources.yaml
应用资源清单文件
```
```shell
kubectl apply -f custom-resources.yaml
```
监视calico-sysem命名空间中pod运行情况
```
```shell
watch kubectl get pods -n calico-system
```
@ -517,7 +500,7 @@ k8s-node2 Ready <none> 12m v1.29.0
查看所有$pod$是否正常运行
```
```shell
kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
@ -551,7 +534,78 @@ tigera-operator tigera-operator-7f8cd97876-7s58q 1/1 Running
安装目录:/etc/kubernetes/
组件配置文件目录:/etc/kubernetes/manifests/
```shell
[root@k8s-master ~]# kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-qf7tg 0/1 CrashLoopBackOff 30 (3m21s ago) 133m
kube-flannel-ds-tlczf 1/1 Running 0 133m
kube-flannel-ds-xn98c 1/1 Running 0 133m
```
发现有一个POD有问题不停的`CrashLoopBackOff`
参考文档https://www.cnblogs.com/williamzheng/p/18357226
```
[root@k8s-master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
# 记录下来上面的网卡名称ens192
```
```shell
kubectl edit ds kube-flannel-ds -n kube-flannel
```
![](https://dsideal.obs.cn-north-1.myhuaweicloud.com/HuangHai/BlogImages/202409111853398.png)
```shell
- --iface=ens192
:w
:q!
```
再次查看
```
kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
calico-apiserver calico-apiserver-6bb6cf484f-48c57 1/1 Running 0 136m
calico-apiserver calico-apiserver-6bb6cf484f-xlbh4 1/1 Running 0 136m
calico-system calico-kube-controllers-7bc767bbcb-pxppk 1/1 Running 0 142m
calico-system calico-node-6cc8l 1/1 Running 0 142m
calico-system calico-node-vkvjz 1/1 Running 0 142m
calico-system calico-node-wvk6q 1/1 Running 0 142m
calico-system calico-typha-74545574b-6jpgq 1/1 Running 0 142m
calico-system calico-typha-74545574b-vx9kv 1/1 Running 0 142m
calico-system csi-node-driver-7pxtt 2/2 Running 0 142m
calico-system csi-node-driver-lflc6 2/2 Running 0 142m
calico-system csi-node-driver-r5npp 2/2 Running 0 142m
default web-76fd95c67-ckvcn 1/1 Running 0 134m
default web-76fd95c67-zl9dz 1/1 Running 0 134m
kube-flannel kube-flannel-ds-48wgr 1/1 Running 0 3m32s
kube-flannel kube-flannel-ds-9fmfz 1/1 Running 0 3m38s
kube-flannel kube-flannel-ds-nfhhb 1/1 Running 0 3m39s
kube-system coredns-76f75df574-q6vps 1/1 Running 0 152m
kube-system coredns-76f75df574-srxnf 1/1 Running 0 152m
kube-system etcd-k8s-master 1/1 Running 0 152m
kube-system kube-apiserver-k8s-master 1/1 Running 0 152m
kube-system kube-controller-manager-k8s-master 1/1 Running 0 152m
kube-system kube-proxy-8t78q 1/1 Running 0 149m
kube-system kube-proxy-glwfx 1/1 Running 0 152m
kube-system kube-proxy-qg4t7 1/1 Running 0 149m
kube-system kube-scheduler-k8s-master 1/1 Running 0 152m
tigera-operator tigera-operator-7f8cd97876-7s58q 1/1 Running 0 145m
```
现在终于都正常了~
### 三、测试
@ -569,10 +623,10 @@ kubectl expose deployment web --port=80 --type=NodePort
service/web exposed
```
查看pod运行状态
查看$pod$运行状态
```
kubectl get pod,svc
kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/web-76fd95c67-ckvcn 0/1 ContainerCreating 0 23s

@ -10,7 +10,7 @@ spec:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 192.168.0.0/16
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()

Loading…
Cancel
Save