k8s部署
k8s中容器的管理方式
Kubernetes集群创建方式
centainerd
默认情况下,K8S在创建集群时使用的方式
docker
docker使用的普记录最高,虽然K8S在1.24版本后已经费力了kubelet对docker的支持,但时可以借助cri-docker方式来实现集群创建
cri-o
CRI-O的方式是Kubernetes创建容器最直接的一种方式,在创建集群的时候,需要借助于cri-o插件的方式来实现Kubernetes集群的创建
[!NOTE]
docker 和cri-o 这两种方式要对kubelet程序的启动参数进行设置
K8s–harbor主机
# 以下火墙及selinux每个主机都得关
[root@docker-harbor ~]# systemctl disable --now firewalld
Removed "/etc/systemd/system/multi-user.target.wants/firewalld.service".
Removed "/etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service".
[root@docker-harbor ~]# getenforce
Disabled[root@docker-harbor ~]# cd /etc/yum.repos.d
[root@docker-harbor yum.repos.d]# vim docker.repo
[root@docker-harbor yum.repos.d]# cat docker.repo
[docker]
name = docker-ce
baseurl = https://mirrors.aliyun.com/docker-ce/linux/rhel/9/x86_64/stable
gpgcheck = 0[root@docker-harbor yum.repos.d]# yum makecache
[root@docker-harbor yum.repos.d]# rpm -qa | grep podman
podman-4.6.1-5.el9.x86_64
cockpit-podman-76-1.el9_3.noarch[root@docker-harbor yum.repos.d]# rm -rf podman-4.6.1-5.el9.x86_64
[root@docker-harbor yum.repos.d]# rm -rf cockpit-podman-76-1.el9_3.noarch# 上传所需文件
[root@docker-harbor ~]# cd /mnt/
[root@docker-harbor mnt]# ls
docker.tar.gz packages.zip[root@docker-harbor mnt]# tar zxf docker.tar.gz
[root@docker-harbor mnt]# unzip packages.zip[root@docker-harbor mnt]# ls
docker docker.tar.gz packages packages.zip
[root@docker-harbor mnt]# cd docker/[root@docker-harbor docker]# yum install *.rpm -y
# 在第15行命令,在后面加上参数 --iptables=true
[root@docker-harbor docker]# vim /usr/lib/systemd/system/docker.service
15 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=true[root@docker-harbor docker]# systemctl daemon-reload
[root@docker-harbor docker]# systemctl restart docker
[root@docker-harbor docker]# echo net.ipv4.ip_forward=1 >> /etc/sysctl.conf
[root@docker-harbor docker]# sysctl -p
net.ipv4.ip_forward = 1[root@docker-harbor docker]# systemctl enable --now docker
[root@docker-harbor docker]# docker info
# 以下除了rhel9不需要做,其他版本的系统建议去做
# 激活内核网络选项
]# echo br_netfilter > /etc/modules-load.d/docker_mod.conf
]# modprobe br_netfilter
]# vim /etc/sysctl.d/docker.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1]# sysctl --system
]# systemctl restart docker
# 创建了证书与密钥
[root@docker-harbor ~]# mkdir -p /data/certs
[root@docker-harbor ~]# openssl req -newkey rsa:4096 \
-nodes -sha256 -keyout /data/certs/dhj.org.key \
-addext "subjectAltName = DNS:reg.dhj.org" \
-x509 -days 365 -out /data/certs/dhj.org.crtCommon Name (eg, your name or your server's hostname) []:reg.dhj.org
# 创建证书目录并部署信任证书(使Docker客户端信任私有仓库的HTTPS证书)
[root@docker-harbor ~]# mkdir /etc/docker/certs.d/reg.dhj.org/ -p
[root@docker-harbor ~]# cp /data/certs/dhj.org.crt /etc/docker/certs.d/reg.dhj.org/ca.crt
[root@docker-harbor ~]# systemctl restart docker
[root@docker-harbor ~]# cd /mnt/packages/
[root@docker-harbor packages]# cp -p harbor-offline-installer-v2.5.4.tgz /root[root@docker-harbor packages]# cd
[root@docker-harbor ~]# tar zxf harbor-offline-installer-v2.5.4.tgz[root@docker-harbor ~]# cd harbor
[root@docker-harbor harbor]# cp harbor.yml.tmpl harbor.yml# 需要修改内容如下(如果一致,不变即可):
[root@docker-harbor harbor]# vim harbor.yml
5 hostname: reg.dhj.org
17 certificate: /data/certs/dhj.org.crt # 看自己的存放位置
18 private_key: /data/certs/dhj.org.key # 看自己的存放位置
34 harbor_admin_password: admin # 初始密码
47 data_volume: /data # 此处挂载的目录(需要跟上面证书与密钥在一个目录下)
[root@docker-harbor harbor]# ./install.sh --with-chartmuseum
[root@docker-harbor harbor]# docker compose stop
[root@docker-harbor harbor]# docker compose up -d
# 去浏览器中去测试172.25.254.200
[root@docker-harbor ~]# cd /etc/docker/
[root@docker-harbor docker]# vim daemon.json
[root@docker-harbor docker]# cat daemon.json
{
"registry-mirrors": ["https://reg.dhj.org"]
}[root@docker-harbor docker]# systemctl restart docker
[root@k8s-harbor ~]# cd harbor/
[root@k8s-harbor harbor]# docker compose up -d[root@docker-harbor harbor]# docker login reg.dhj.org
Username: admin
Password:admin
集群环境初始化
所有k8s集群节点执行以下步骤
关闭火墙及SELinux
所有禁用swap和本地解析
]# vim /etc/fstab
UUID=5b62292e-603b-467a-a1ad-160d612e6f81 / xfs defaults 0 0
UUID=31c1cd5c-22a7-42e8-91d1-f36d1bc1edaa /boot xfs defaults 0 0
# UUID=51600096-c0fa-4247-8d7f-6a40b6b05300 none swap defaults 0 0 # 将最后一行注释掉即可]# systemctl daemon-reload
]# systemctl mask swap.target
]# swapoff -a
]# swapon -s # 没有显示内容即可(不保险的话:可以使用reboot之后,再次 swapon -s 查看)[root@k8s-node1 ~]# vim /etc/hosts
172.25.254.10 k8s-node1
172.25.254.20 k8s-node2
172.25.254.100 k8s-master
172.25.254.200 reg.dhj.org# 快速将每台主机部署
[root@k8s-node1 ~]# scp /etc/hosts root@172.25.254.20:/etc/hosts
[root@k8s-node1 ~]# scp /etc/hosts root@172.25.254.100:/etc/hosts
[root@k8s-node1 ~]# scp /etc/hosts root@172.25.254.200:/etc/hosts
所有安装docke
[root@docker-harbor yum.repos.d]# rpm -qa | grep podman
podman-4.6.1-5.el9.x86_64
cockpit-podman-76-1.el9_3.noarch[root@docker-harbor yum.repos.d]# rm -rf podman-4.6.1-5.el9.x86_64
[root@docker-harbor yum.repos.d]# rm -rf cockpit-podman-76-1.el9_3.noarch
# 之前harbor已经安装就不用管了,其余必须安装
]# vim /etc/yum.repos.d/docker.repo
[docker]
name=docker
baseurl=https://mirrors.aliyun.com/docker-ce/linux/rhel/9/x86_64/stable/
gpgcheck=0]# yum makecache
]# dnf install docker-ce -y # 此处要是不想安装的话,可以将harbor里面的东西使用scp复制过来即可。# 在第15行命令,在后面加上参数 --iptables=true
]# vim /usr/lib/systemd/system/docker.service
15 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=true]# systemctl daemon-reload
]# systemctl restart docker
# 如果不想安装的话,执行以下命令
[root@reg ~]# cd /mnt/docker/
[root@reg docker]# scp * root@172.25.254.10:/mnt
[root@reg docker]# scp * root@172.25.254.20:/mnt
[root@reg docker]# scp * root@172.25.254.100:/mnt
]# dnf install -y /mnt/*.rpm
[root@k8s-master mnt]# vim /usr/lib/systemd/system/docker.service
15 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=true[root@k8s-master mnt]# scp /usr/lib/systemd/system/docker.service root@172.25.254.10:/usr/lib/systemd/system/docker.service
[root@k8s-master mnt]# scp /usr/lib/systemd/system/docker.service root@172.25.254.20:/usr/lib/systemd/system/docker.service]# systemctl daemon-reload
]# systemctl restart docker
所有阶段复制harbor仓库中的证书并启动docker
]# mkdir /etc/docker/certs.d/reg.dhj.org/ -p
[root@reg ~]# scp /etc/docker/certs.d/reg.dhj.org/ca.crt root@172.25.254.10:/etc/docker/certs.d/reg.dhj.org/ca.crt
[root@reg ~]# scp /etc/docker/certs.d/reg.dhj.org/ca.crt root@172.25.254.20:/etc/docker/certs.d/reg.dhj.org/ca.crt
[root@reg ~]# scp /etc/docker/certs.d/reg.dhj.org/ca.crt root@172.25.254.100:/etc/docker/certs.d/reg.dhj.org/ca.crt
]# systemctl restart docker]# systemctl enable --now docker
# 此时需要将docker作为我们的默认库
# 编写主机的镜像库的加速节点(这里以master主机为例,编写好之后进行scp即可)
[root@k8s-master ~]# cd /etc/docker/[root@k8s-master docker]# vim daemon.json
{
"registry-mirrors": ["https://reg.dhj.org"]
}[root@k8s-master docker]# cd
[root@k8s-master ~]# scp /etc/docker/daemon.json root@172.25.254.10:/etc/docker/daemon.json
[root@k8s-master ~]# scp /etc/docker/daemon.json root@172.25.254.20:/etc/docker/daemon.json]# systemctl restart docker
# 登陆harbor仓库
]# docker login reg.dhj.org
Username: admin
Password: admin# 登录上去之后进行测试
]# docker info
# 此处要是显示Error response from daemon: Get "https://reg.dhj.org/v2/": dial tcp 172.25.254.200:443: connect: connection refused
# 可以在harbor主机中执行以下命令然后再次测试即可
[root@reg ~]# cd harbor/
[root@reg harbor]# docker compose stop
[root@reg harbor]# docker compose up -d
安装K8S部署工具
# 部署软件仓库,添加K8S源
[root@k8s-master ~]# vim /etc/yum.repos.d/k8s.repo
[k8s]
name=k8s
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm
gpgcheck=0[root@k8s-master ~]# scp /etc/yum.repos.d/k8s.repo root@172.25.254.10:/etc/yum.repos.d/k8s.repo
[root@k8s-master ~]# scp /etc/yum.repos.d/k8s.repo root@172.25.254.20:/etc/yum.repos.d/k8s.repo]# yum makecache
# 安装软件
]# dnf install kubelet-1.30.0 kubeadm-1.30.0 kubectl-1.30.0 -y
pod管理
高可用性和可靠性:
自动故障恢复:如果一个 Pod 失败或被删除,控制器会自动创建新的 Pod 来维持期望的副本数量。确保应用始终处于可用状态,减少因单个 Pod 故障导致的服务中断。
健康检查和自愈:可以配置控制器对 Pod 进行健康检查(如存活探针和就绪探针)。如果 Pod 不健康,控制器会采取适当的行动,如重启 Pod 或删除并重新创建它,以保证应用的正常运行。
可扩展性:
轻松扩缩容:可以通过简单的命令或配置更改来增加或减少 Pod 的数量,以满足不同的工作负载需求。例如,在高流量期间可以快速扩展以处理更多请求,在低流量期间可以缩容以节省资源。
水平自动扩缩容(HPA):可以基于自定义指标(如 CPU 利用率、内存使用情况或应用特定的指标)自动调整 Pod 的数量,实现动态的资源分配和成本优化。
版本管理和更新:
滚动更新:对于 Deployment 等控制器,可以执行滚动更新来逐步替换旧版本的 Pod 为新版本,确保应用在更新过程中始终保持可用。可以控制更新的速率和策略,以减少对用户的影响。
回滚:如果更新出现问题,可以轻松回滚到上一个稳定版本,保证应用的稳定性和可靠性。
声明式配置:
简洁的配置方式:使用 YAML 或 JSON 格式的声明式配置文件来定义应用的部署需求。这种方式使得配置易于理解、维护和版本控制,同时也方便团队协作。
期望状态管理:只需要定义应用的期望状态(如副本数量、容器镜像等),控制器会自动调整实际状态与期望状态保持一致。无需手动管理每个 Pod 的创建和删除,提高了管理效率。
服务发现和负载均衡:
自动注册和发现:Kubernetes 中的服务(Service)可以自动发现由控制器管理的 Pod,并将流量路由到它们。这使得应用的服务发现和负载均衡变得简单和可靠,无需手动配置负载均衡器。
流量分发:可以根据不同的策略(如轮询、随机等)将请求分发到不同的 Pod,提高应用的性能和可用性。
多环境一致性:
一致的部署方式:在不同的环境(如开发、测试、生产)中,可以使用相同的控制器和配置来部署应用,确保应用在不同环境中的行为一致。这有助于减少部署差异和错误,提高开发和运维效率。
# 监控
watch -n1 命令
#建立控制器并自动运行pod
[root@k8s-master ~]# kubectl create deployment timinglee --image nginx
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
timinglee-859fbf84d6-mrjvx 1/1 Running 0 37m#为timinglee扩容
[root@k8s-master ~]# kubectl scale deployment timinglee --replicas 6
[root@k8s-master ~]# kubectl get pods#为timinglee缩容
root@k8s-master ~]# kubectl scale deployment timinglee --replicas 2
deployment.apps/timinglee scaled
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
timinglee-859fbf84d6-mrjvx 1/1 Running 0 38m
timinglee-859fbf84d6-tsn97 1/1 Running 0 73s
#建立控制器并自动运行pod
[root@k8s-master ~]# kubectl create deployment timinglee --image nginx
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
timinglee-859fbf84d6-mrjvx 1/1 Running 0 37m#为timinglee扩容
[root@k8s-master ~]# kubectl scale deployment timinglee --replicas 6
[root@k8s-master ~]# kubectl get pods#为timinglee缩容
root@k8s-master ~]# kubectl scale deployment timinglee --replicas 2
deployment.apps/timinglee scaled
[root@k8s-master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
timinglee-859fbf84d6-mrjvx 1/1 Running 0 38m
timinglee-859fbf84d6-tsn97 1/1 Running 0 73s
控制器
控制器也是管理pod的一种手段
自主式pod:pod退出或意外关闭后不会被重新创建
控制器管理的 Pod:在控制器的生命周期里,始终要维持 Pod 的副本数目
Pod控制器是管理pod的中间层,使用Pod控制器之后,只需要告诉Pod控制器,想要多少个什么样的Pod就可以了,它会创建出满足条件的Pod并确保每一个Pod资源处于用户期望的目标状态。如果Pod资源在运行中出现故障,它会基于指定策略重新编排Pod
当建立控制器后,会把期望值写入etcd,k8s中的apiserver检索etcd中我们保存的期望状态,并对比pod的当前状态,如果出现差异代码自驱动立即恢复
deployment控制器示例
# 生成yaml文件
[root@k8s-master ~]# kubectl create deployment deployment --image myapp:v1 --dry-run=client -o yaml > deployment.yml[root@k8s-master ~]# vim deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
spec:
replicas: 4
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- image: myapp:v1
name: myapp
# 建立pod
[root@k8s-master ~]# kubectl apply -f deployment.yml
deployment.apps/deployment created# 查看pod信息
[root@k8s-master ~]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
deployment-5d886954d4-2ckqw 1/1 Running 0 23s app=myapp,pod-template-hash=5d886954d4
deployment-5d886954d4-m8gpd 1/1 Running 0 23s app=myapp,pod-template-hash=5d886954d4
deployment-5d886954d4-s7pws 1/1 Running 0 23s app=myapp,pod-template-hash=5d886954d4
deployment-5d886954d4-wqnvv 1/1 Running 0 23s app=myapp,pod-template-hash=5d886954d4
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deployment-5d886954d4-2ckqw 1/1 Running 0 2m40s 10.244.2.14 k8s-node2 <none> <none>
deployment-5d886954d4-m8gpd 1/1 Running 0 2m40s 10.244.1.17 k8s-node1 <none> <none>
deployment-5d886954d4-s7pws 1/1 Running 0 2m40s 10.244.1.16 k8s-node1 <none> <none>
deployment-5d886954d4-wqnvv 1/1 Running 0 2m40s 10.244.2.15 k8s-node2 <none> <none># pod运行容器版本为v1
[root@k8s-master ~]# curl 10.244.2.14
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>[root@k8s-master ~]# kubectl describe deployments.apps deployment
Name: deployment
Namespace: default
CreationTimestamp: Sun, 01 Sep 2024 23:19:10 +0800
Labels: <none>
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=myapp
Replicas: 4 desired | 4 updated | 4 total | 4 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge # 默认每次更新25%
# 更新容器运行版本
[root@k8s-master ~]# vim deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
spec:
minReadySeconds: 5 # 最小就绪时间5秒
replicas: 4
selector:
matchLabels:
app: myapptemplate:
metadata:
labels:
app: myapp
spec:
containers:
- image: myapp:v2 # 更新为版本2
name: myapp[root@k8s2 pod]# kubectl apply -f deployment-example.yaml
# 更新过程
[root@k8s-master ~]# watch - n1 kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE
deployment-5d886954d4-8kb28 1/1 Running 0 48s
deployment-5d886954d4-8s4h8 1/1 Running 0 49s
deployment-5d886954d4-rclkp 1/1 Running 0 50s
deployment-5d886954d4-tt2hz 1/1 Running 0 50s
deployment-7f4786db9c-g796x 0/1 Pending 0 0s# 测试更新效果
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deployment-7f4786db9c-967fk 1/1 Running 0 10s 10.244.1.26 k8s-node1 <none> <none>
deployment-7f4786db9c-cvb9k 1/1 Running 0 10s 10.244.2.24 k8s-node2 <none> <none>
deployment-7f4786db9c-kgss4 1/1 Running 0 9s 10.244.1.27 k8s-node1 <none> <none>
deployment-7f4786db9c-qts8c 1/1 Running 0 9s 10.244.2.25 k8s-node2 <none> <none>[root@k8s-master ~]# curl 10.244.1.26
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
版本回滚
[root@k8s-master ~]# vim deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
spec:
replicas: 4
selector:
matchLabels:
app: myapptemplate:
metadata:
labels:
app: myapp
spec:
containers:
- image: myapp:v1 # 回滚到之前版本
name: myapp
[root@k8s-master ~]# kubectl apply -f deployment.yml
deployment.apps/deployment configured# 测试回滚效果
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deployment-5d886954d4-dr74h 1/1 Running 0 8s 10.244.2.26 k8s-node2 <none> <none>
deployment-5d886954d4-thpf9 1/1 Running 0 7s 10.244.1.29 k8s-node1 <none> <none>
deployment-5d886954d4-vmwl9 1/1 Running 0 8s 10.244.1.28 k8s-node1 <none> <none>
deployment-5d886954d4-wprpd 1/1 Running 0 6s 10.244.2.27 k8s-node2 <none> <none>[root@k8s-master ~]# curl 10.244.2.26
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
滚动更新策略
[root@k8s-master ~]# vim deployment.yml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
spec:
minReadySeconds: 5 # 最小就绪时间,指定pod每隔多久更新一次
replicas: 4
strategy: # 指定更新策略
rollingUpdate:
maxSurge: 1 # 比定义pod数量多几个
maxUnavailable: 0 # 比定义pod个数少几个
selector:
matchLabels:
app: myapptemplate:
metadata:
labels:
app: myapp
spec:
containers:
- image: myapp:v1
name: myapp
[root@k8s2 pod]# kubectl apply -f deployment-example.yaml
daemonset 示例
[root@k8s2 pod]# vim daemonset-example.yml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: daemonset-example
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
tolerations: # 对于污点节点的容忍
- effect: NoSchedule
operator: Exists
containers:
- name: nginx
image: nginx:1.23[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
daemonset-87h6s 1/1 Running 0 47s 10.244.0.8 k8s-master <none> <none>
daemonset-n4vs4 1/1 Running 0 47s 10.244.2.38 k8s-node2 <none> <none>
daemonset-vhxmq 1/1 Running 0 47s 10.244.1.40 k8s-node1 <none> <none>
# 回收
[root@k8s2 pod]# kubectl delete -f daemonset-example.yml
job 控制器示例
# 先上传测试的镜像
[root@k8s-master mnt]# docker load -i perl-5.34.tar.gz
[root@k8s-master mnt]# docker tag perl:5.34.0 reg.dhj.org/library/perl:5.34.0
[root@k8s-master mnt]# docker push reg.dhj.org/library/perl:5.34.0[root@k8s2 pod]# vim job.yml
apiVersion: batch/v1
kind: Job
metadata:
name: pi
spec:
completions: 6 # 一共完成任务数为6
parallelism: 2 # 每次并行完成2个
template:
spec:
containers:
- name: pi
image: perl:5.34.0
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] # 计算Π的后2000位
restartPolicy: Never # 关闭后不自动重启
backoffLimit: 4 # 运行失败后尝试4重新运行[root@k8s2 pod]# kubectl apply -f job.yml
cronjob控制器示例
[root@k8s2 pod]# vim cronjob.yml
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure[root@k8s2 pod]# kubectl apply -f cronjob.yml
微服务
用控制器来完成集群的工作负载,需要通过微服务暴漏出去后才能被访问
# 生成控制器文件并建立控制器
[root@k8s-master ~]# kubectl create deployment dhj --image myapp:v1 --replicas 2 --dry-run=client -o yaml > dhj.yml[root@k8s-master ~]# kubectl apply -f dhj.yml
deployment.apps/dhj created# 生成微服务yaml追加到已有yaml中
[root@k8s-master ~]# kubectl expose deployment dhj --port 80 --target-port 80 --dry-run=client -o yaml >> dhj.yml
[root@k8s-master ~]# vim dhj.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: dhj
name: dhj
spec:
replicas: 2
selector:
matchLabels:
app: dhj
template:
metadata:
creationTimestamp: null
labels:
app: dhj
spec:
containers:
- image: myapp:v1
name: myapp
--- # 不同资源间用---隔开apiVersion: v1
kind: Service
metadata:
labels:
app: dhj
name: dhj
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: dhj[root@k8s-master ~]# kubectl apply -f dhj.yml
service/dhj created[root@k8s-master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dhj ClusterIP 10.97.95.78 <none> 80/TCP 6s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d17h
微服务默认使用iptables调度
[root@k8s-master ~]# kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
dhj ClusterIP 10.97.95.78 <none> 80/TCP 112s app=dhj # 集群内部ip为78
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d17h <none># 可以在火墙中查看到策略信息
[root@k8s-master ~]# iptables -t nat -nL
KUBE-SVC-RV2MKWNOHWTUCXGM tcp -- 0.0.0.0/0 10.97.95.78 /* default/dhj cluster IP */ tcp dpt:80
ipvs模式配置方式
在所有节点中安装ipvsadm
[root@k8s-master & node1 & node2 ~]# yum install ipvsadm -y
修改master节点的代理配置
[root@k8s-master ~]# kubectl -n kube-system edit cm kube-proxy
metricsBindAddress: ""
mode: "ipvs" #设置kube-proxy使用ipvs模式
nftables:
重启pod
# 强制重启Kubernetes集群中kube-system命名空间下的所有kube-proxy Pod
[root@k8s-master ~]# kubectl -n kube-system get pods | awk '/kube-proxy/{system("kubectl -n kube-system delete pods "$1)}'[root@k8s-master ~]# ipvsadm -Ln
集群储存
ConfigMap演练
ConfigMap创建方式
[root@k8s-master mnt]# kubectl create cm ceshi --from-literal name1=dhj
[root@k8s-master mnt]# kubectl describe cm ceshi
使用ConfigMap填充环境变量
[root@k8s-master mnt]# kubectl run testpod --image busyboxplus --dry-run=client -o yaml > test.yml
[root@k8s-master ~]# vim cmcfg1.yml
[root@k8s-master mnt]# kubectl apply -f cmcfg1.yml[root@k8s-master ~]# kubectl create cm cm-ceshi --from-file cmcfg1.yml --dry-run=client
[root@k8s-master ~]# kubectl describe cm cm-ceshi
# 将cm中的内容映射为指定变量
[root@k8s-master ~]# vim testyaml1.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: testyaml
name: testyaml
spec:
containers:
- image: busyboxplus:latest
name: testyaml
command:
- /bin/sh
- -c
- env
env:
- name: key1
valueFrom:
configMapKeyRef:
name: testyaml
key: db_host
- name: key2
valueFrom:
configMapKeyRef:
name: testyaml
key: db_port
restartPolicy: Never[root@k8s-master ~]# kubectl apply -f testyaml1.yml
[root@k8s-master mnt]# kubectl create cm testpod --from-file testyaml1.yml
[root@k8s-master mnt]# kubectl logs pod/testyaml | grep key
key1=172.25.254.100
key2=3306
# 把cm中的值直接映射为变量
[root@k8s-master ~]# vim testpod2.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: testpod
name: testpod
spec:
containers:
- image: busyboxplus:latest
name: testpod
command:
- /bin/sh
- -c
- env
envFrom:
- configMapRef:
name: testpod
restartPolicy: Never# 查看日志
[root@k8s-master mnt]# kubectl logs pod/testpod | grep db
db_port=3306
db_host=172.25.254.100
通过数据卷使用ConfigMap
[root@k8s-master ~]# vim testpod4.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: testpod
name: testpod
spec:
containers:
- image: busyboxplus:latest
name: testpod
command:
- /bin/sh
- -c
- cat /config/db_host
volumeMounts: # 调用卷策略
- name: config-volume # 卷名称
mountPath: /config
volumes: # 声明卷的配置
- name: config-volume # 卷名称
configMap:
name: testpod
restartPolicy: Never# 查看日志
[root@k8s-master ~]# kubectl logs testpod
172.25.254.100
利用ConfigMap填充pod
# 建立配置文件模板
[root@k8s-master ~]# vim nginx.conf
server {
listen 8000;
server_name _;
root /usr/share/nginx/html;
index index.html;
}# 利用模板生成cm
root@k8s-master ~]# kubectl create cm nginx-conf --from-file nginx.conf
configmap/nginx-conf created
[root@k8s-master ~]# kubectl describe cm nginx-conf
Name: nginx-conf
Namespace: default
Labels: <none>
Annotations: <none>Data
====
nginx.conf:
----
server {
listen 8000;
server_name _;
root /usr/share/nginx/html;
index index.html;
}
BinaryData
====Events: <none>
# 建立nginx控制器文件
[root@k8s-master ~]# kubectl create deployment nginx --image nginx:latest --replicas 1 --dry-run=client -o yaml > nginx.yml# 设定nginx.yml中的卷
[root@k8s-master ~]# vim nginx.yml
[root@k8s-master ~]# cat nginx.
cat: nginx.: 没有那个文件或目录
[root@k8s-master ~]# cat nginx.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx:latest
name: nginx
volumeMounts:
- name: config-volume
mountPath: /etc/nginx/conf.dvolumes:
- name: config-volume
configMap:
name: nginx-conf
# 测试
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-8487c65cfc-cz5hd 1/1 Running 0 3m7s 10.244.2.38 k8s-node2 <none> <none>
[root@k8s-master ~]# curl 10.244.2.38:8000
集群网络及调度
部署Calico
删除flannel插件
[root@k8s-master ~]# kubectl delete -f kube-flannel.yml
删除所有节点上flannel配置文件,避免冲突
[root@k8s-master & node1-2 ~]# rm -rf /etc/cni/net.d/xxxxxxx # xxx:这里需要自己tab补齐
下载部署文件
# 如果使用的是资源包里面的内容,这里建议上传而不是下载
[root@k8s-master calico]# curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico-typha.yaml -o calico.yaml
更改yml设置
[root@k8s-master calico]# vim calico.yaml
4835 image: calico/cni:v3.28.1
4835 image: calico/cni:v3.28.1
4906 image: calico/node:v3.28.1
4932 image: calico/node:v3.28.1
5160 image: calico/kube-controllers:v3.28.1
5249 - image: calico/typha:v3.28.14970 - name: CALICO_IPV4POOL_IPIP
4971 value: "Never"4999 - name: CALICO_IPV4POOL_CIDR
5000 value: "8.3.244.0.0/16" # 这里使用的是自己的cluster网段(下面有查看网段的命令)
5001 - name: CALICO_AUTODETECTION_METHOD
5002 value: "interface=eth0"[root@k8s-master calico]# kubectl apply -f calico.yaml
[root@k8s-master calico]# kubectl -n kube-system get pods
# 查看集群网段
[root@k8s-master ~]# cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cidr
测试
[root@k8s-master mnt]# kubectl run web --image myapp:v1
pod/web created
[root@k8s-master mnt]# kubectl get pods -o wide
web 1/1 Running 0 10s 10.244.169.128 k8s-node2 <none> <none>
[root@k8s-master mnt]# curl 10.244.169.128
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>
nodename
nodeName 是节点选择约束的最简单方法,但一般不推荐;
如果 nodeName 在 PodSpec 中指定了,则它优先于其他的节点选择方法
使用 nodeName 来选择节点的一些限制
如果指定的节点不存在。
如果指定的节点没有资源来容纳 pod,则pod 调度失败。
云环境中的节点名称并非总是可预测或稳定的
实例
# 建立pod文件
[root@k8s-master mnt]# kubectl run testpod --image myapp:v1 --dry-run=client -o yaml > pod1.yml
# 查看cluster节点名称
[root@k8s-master mnt]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane 8d v1.30.0
k8s-node1 Ready <none> 8d v1.30.0
k8s-node2 Ready <none> 8d v1.30.0# 编写yml文件并更新
[root@k8s-master mnt]# vim pod1.yml
[root@k8s-master mnt]# kubectl apply -f pod1.yml# 测试
[root@k8s-master mnt]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
testpod 1/1 Running 0 12s 10.244.36.64 k8s-node1 <none> <none># 如果在yml文件中写了一个不属于集群的节点,状态会一直显示pending
Nodeselector(通过标签控制节点)
-
nodeSelector 是节点选择约束的最简单推荐形式
-
给选择的节点添加标签
kubectl label nodes <node-name> <label-key>=xxx
kubectl label nodes <node-name> <label-key>-
集群认证
ServiceAccount示例
[root@k8s-master ~]# kubectl create sa timinglee
serviceaccount/timinglee created
[root@k8s-master ~]# kubectl describe sa timinglee
Name: timinglee
Namespace: default
Labels: <none>
Annotations: <none>
Image pull secrets: <none>
Mountable secrets: <none>
Tokens: <none>
Events: <none>
建立secrets
[root@k8s-master ~]# kubectl create secret docker-registry docker-login --docker-username admin --docker-password lee --docker-server reg.timinglee.org --docker-email lee@timinglee.org
secret/docker-login created
[root@k8s-master ~]# kubectl describe secrets docker-login
Name: docker-login
Namespace: default
Labels: <none>
Annotations: <none>Type: kubernetes.io/dockerconfigjson
Data
====
.dockerconfigjson: 119 bytes
将secrets注入到sa中
[root@k8s-master ~]# kubectl edit sa timinglee
apiVersion: v1
imagePullSecrets:
- name: docker-login
kind: ServiceAccount
metadata:
creationTimestamp: "2024-09-08T15:44:04Z"
name: timinglee
namespace: default
resourceVersion: "262259"
uid: 7645a831-9ad1-4ae8-a8a1-aca7b267ea2d[root@k8s-master ~]# kubectl describe sa timinglee
Name: timinglee
Namespace: default
Labels: <none>
Annotations: <none>
Image pull secrets: docker-login
Mountable secrets: <none>
Tokens: <none>
Events: <none>
建立私有仓库并且利用pod访问私有仓
[root@k8s-master auth]# vim example1.yml
[root@k8s-master auth]# kubectl apply -f example1.yml
pod/testpod created
[root@k8s-master auth]# kubectl describe pod testpod
Warning Failed 5s kubelet Failed to pull image "reg.timinglee.org/lee/nginx:latest": Error response from daemon: unauthorized: unauthorized to access repository: lee/nginx, action: pull: unauthorized to access repository: lee/nginx, action: pull
Warning Failed 5s kubelet Error: ErrImagePull
Normal BackOff 3s (x2 over 4s) kubelet Back-off pulling image "reg.timinglee.org/lee/nginx:latest"
Warning Failed 3s (x2 over 4s) kubelet Error: ImagePullBackOff
pod绑定sa
[root@k8s-master auth]# vim example1.yml
apiVersion: v1
kind: Pod
metadata:
name: testpod
spec:
serviceAccountName: timinglee
containers:
- image: reg.timinglee.org/lee/nginx:latest
name: testpod[root@k8s-master auth]# kubectl apply -f example1.yml
pod/testpod created
[root@k8s-master auth]# kubectl get pods
NAME READY STATUS RESTARTS AGE
testpod 1/1 Running 0 2s