1. vmware安装ubuntu
略
2. 安装docker
2.1 使用国内docker源
$ cat /etc/docker/daemon.json
{
"exec-opts" : ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://18lpui8e.mirror.aliyuncs.com",
"https://docker.unsee.tech",
"https://dockerpull.org"
]
}
apt install -y docker.io
systemctl restart docker
systemctl status docker
2.2 禁用swap
swapoff -a
sed -i '/swap/ s/^\(.*\)$/#\1/g' /etc/fstab
2.3 加载内核模块
# 使用 modprobe 命令加载以下内核模块
modprobe overlay
modprobe br_netfilter
# 永久加载这些模块
tee /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
# 修改内核参数。创建一个文件,并使用 sysctl 命令加载参数。
tee /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
2.4 修改主机名和配置host文件
#node节点修改成对应的node名称
hostnamectl hostname ubuntu-master
#修改/etc/hosts, 文件最后增加如下内容:
192.168.237.128 ubuntu-master
192.168.237.129 ubuntu-node1
192.168.237.130 ubuntu-node2
2.5 安装kubeadm
使用aliyun镜像
apt install -y apt-transport-https ca-certificates curl
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | tee /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt update
使用1.23 或之前的版本, 1.24 之后的版本不支持docker
apt install -y kubeadm=1.23.3-00 kubelet=1.23.3-00 kubectl=1.23.3-00
kubeadm version
kubectl version
从aliyun镜像下载相关docker image,然后打包成google镜像名!!!
- 获取image list
clh@ubuntu-master:~$ kubeadm config images list
I1120 00:57:27.313592 14321 version.go:255] remote version is much newer: v1.31.2; falling back to: stable-1.23
k8s.gcr.io/kube-apiserver:v1.23.17
k8s.gcr.io/kube-controller-manager:v1.23.17
k8s.gcr.io/kube-scheduler:v1.23.17
k8s.gcr.io/kube-proxy:v1.23.17
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
clh@ubuntu-master:~$
- 替换如下脚本的image版本,并运行该脚本
images=(
kube-apiserver:v1.23.17
kube-controller-manager:v1.23.17
kube-scheduler:v1.23.17
kube-proxy:v1.23.17
pause:3.6
etcd:3.5.1-0
coredns:v1.8.6
)
for imageName in ${images[@]} ; do
docker pull registry.aliyuncs.com/google_containers/$imageName
docker tag registry.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.aliyuncs.com/google_containers/$imageName
done
- coredns 因为名字少了一个coredns/,单独处理一下
root@ubuntu-master:~# docker tag k8s.gcr.io/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6
root@ubuntu-master:~# docker rmi k8s.gcr.io/coredns:v1.8.6
docker rmi k8s.gcr.io/coredns:v1.8.6
root@ubuntu-master:~#
- 检查一下image
root@ubuntu-master:~# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu latest 59ab366372d5 5 weeks ago 78.1MB
k8s.gcr.io/kube-apiserver v1.23.17 62bc5d8258d6 21 months ago 130MB
k8s.gcr.io/kube-controller-manager v1.23.17 1dab4fc7b6e0 21 months ago 120MB
k8s.gcr.io/kube-scheduler v1.23.17 bc6794cb54ac 21 months ago 51.9MB
k8s.gcr.io/kube-proxy v1.23.17 f21c8d21558c 21 months ago 111MB
k8s.gcr.io/etcd 3.5.1-0 25f8c7f3da61 3 years ago 293MB
k8s.gcr.io/coredns/coredns v1.8.6 a4ca41631cc7 3 years ago 46.8MB
k8s.gcr.io/pause
root@ubuntu-master:~#
- 执行kubeadm init 命令,指定版本
root@ubuntu-master:~# kubeadm init --apiserver-advertise-address=192.168.237.128 --pod-network-cidr=10.244.0.0/16 --kubernetes-version=v1.23.3
- 最后的log
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.237.128:6443 --token 43dc70.hbjdp8k1whnghxj0 \
--discovery-token-ca-cert-hash sha256:0a387cf419fe93f5819eab6d0017f289bd823b2ba3a5c577905094695adcb46b
root@ubuntu-master:~#
- 最后执行:
root@ubuntu-master:~# export KUBECONFIG=/etc/kubernetes/admin.conf
- 可以看到node:
clh@ubuntu-master:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
ubuntu-master NotReady control-plane,master 10h v1.23.3
clh@ubuntu-master:~$
- 配置网络
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# 注意,需要在kubeadm init 时设置 --pod-network-cidr=10.244.0.0/16,否则修改flannel.yml Network部分
root@ubuntu-master:~# kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
root@ubuntu-master:~#
- 过会儿再看node状态
root@ubuntu-master:~# kubectl get node
NAME STATUS ROLES AGE VERSION
ubuntu-master Ready control-plane,master 10h v1.23.3
root@ubuntu-master:~#
Worker节点安装
- kubeadm init 前面步骤跟master节点一样
- master 节点执行:
root@ubuntu-master:~# kubeadm token create --print-join-command
kubeadm join 192.168.237.128:6443 --token xkyr7l.a2x3ftpy6uld22lt --discovery-token-ca-cert-hash sha256:0a387cf419fe93f5819eab6d0017f289bd823b2ba3a5c577905094695adcb46b
root@ubuntu-master:~#
- worker 节点copy上面的命令
root@ubuntu-node1:~# kubeadm join 192.168.237.128:6443 --token xkyr7l.a2x3ftpy6uld22lt --discovery-token-ca-cert-hash sha256:0a387cf419fe93f5819eab6d0017f289bd823b2ba3a5c577905094695adcb46b
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 24.0.7. Latest validated version: 20.10
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W1120 01:36:15.591069 4469 utils.go:69] The recommended value for "resolvConf" in "KubeletConfiguration" is: /run/systemd/resolve/resolv.conf; the provided value is: /run/systemd/resolve/resolv.conf
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@ubuntu-node1:~#
- kubernetes pods 一直 ContainerCreating 解决办法: 检查所有节点的/run/flannel/subnet.env,如果没有就从master拷贝一份
root@ubuntu-master:~# kubectl describe pod ngx
5c5340814addc35e971d9874069d48f3f4c376d93ea7de728542b65868575" network for pod "ngx": networkPlugin cni failed to set up pod "ngx_default" network: loadFlannelSubnetEnv failed: open /run/flannel/subnet.env: no such file or directory
root@ubuntu-master:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-f7658b74c-dc7l5 1/1 Running 0 11m
nginx-deployment-f7658b74c-k7wlv 1/1 Running 0 11m
nginx-deployment-f7658b74c-tsmf9 1/1 Running 0 11m
ngx 1/1 Running 0 55m
root@ubuntu-master:~#
- worker节点上执行kubectl报错:
root@ubuntu-node1:~# kubectl get pods
The connection to the server localhost:8080 was refused - did you specify the right host or port?
root@ubuntu-node1:~# p
- 解决方法: 从master节点拷贝 /etc/kubernetes/admin.conf 到worker节点,然后执行:
export KUBECONFIG=/etc/kubernetes/admin.conf
- 可以把这句加到 /root/.profile里面:
root@ubuntu-node1:~# cat .profile
# ~/.profile: executed by Bourne-compatible login shells.
if [ "$BASH" ]; then
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
fi
mesg n 2> /dev/null || true
export KUBECONFIG=/etc/kubernetes/admin.conf
root@ubuntu-node1:~#
- 再执行kubectl命令就可以了
root@ubuntu-node1:~# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-f7658b74c-dc7l5 1/1 Running 0 7h48m
nginx-deployment-f7658b74c-k7wlv 1/1 Running 0 7h48m
nginx-deployment-f7658b74c-tsmf9 1/1 Running 0 7h48m
ngx 1/1 Running 0 8h
root@ubuntu-node1:~#
vmware ubuntu 安装kubernetes