人生苦短,我用k8s--------------k8s集群二进制部署

    科技2024-08-16  30

    一、k8s二进制方式多节点部署

    要先部署单节点集群,可查阅我前一篇博客

    1、环境介绍

    下面拓扑图还有一个harbor仓库没有说明,到时候部署在单独的一台服务器上即可 主机分配

    .2、master02节点操作

    开局优化

    关闭防火墙,关闭核心防护,关闭网络管理功能(生成环境中一定要关闭它)

    [root@localhost ~]# hostnamectl set-hostname master02 '//修改主机名' [root@localhost ~]# su [root@master02 ~]# [root@master02 ~]# systemctl stop firewalld && systemctl disable firewalld '//关闭防火墙' Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. [root@master02 ~]# setenforce 0 && sed -i "s/SELINUX=enforcing/SELNIUX=disabled/g" /etc/selinux/config '//关闭核心防护' [root@master02 ~]# systemctl stop NetworkManager && systemctl disable NetworkManager '//关闭网络管理功能' Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service. Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service. Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.

    master节点操作,将master节点的kubernetes配置文件和启动脚本复制到master02节点

    [root@master ~]# scp -r /opt/kubernetes/ root@192.168.233.130:/opt/ [root@master ~]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.233.130:/usr/lib/systemd/system/

    master02上修改apiserver配置文件中的IP地址

    [root@master02 ~]# cd /opt/kubernetes/cfg/ [root@master02 cfg]# ls kube-apiserver kube-controller-manager kube-scheduler token.csv [root@master02 cfg]# vim kube-apiserver KUBE_APISERVER_OPTS="--logtostderr=true \ --v=4 \ --etcd-servers=https://192.168.233.131:2379,https://192.168.233.132:2379,https://192.168.233.133:2379 \ --bind-address=192.168.233.130 \ '//修改此处的绑定IP地址' --secure-port=6443 \ --advertise-address=192.168.233.130 \ '//修改此处的IP地址' ...省略

    将master节点的etcd证书复制到master02节点(master02上一定要有etcd证书,用来与etcd通信)

    [root@master ~]# scp -r /opt/etcd/ root@192.168.233.130:/opt 1 master02节点查看etcd证书,并启动三个服务 [root@master02 ~]# tree /opt/etcd /opt/etcd ├── bin │ ├── etcd │ └── etcdctl ├── cfg │ └── etcd └── ssl ├── ca-key.pem ├── ca.pem ├── server-key.pem └── server.pem 3 directories, 7 files [root@master02 ~]# systemctl start kube-apiserver.service [root@master02 ~]# systemctl status kube-apiserver.service [root@master02 ~]# systemctl enable kube-apiserver.service [root@master02 ~]# systemctl start kube-controller-manager.service [root@master02 ~]# systemctl status kube-controller-manager.service [root@master02 ~]# systemctl enable kube-controller-manager.service [root@master02 ~]# systemctl enable kube-scheduler.service [root@master02 ~]# systemctl start kube-scheduler.service [root@master02 ~]# systemctl status kube-scheduler.service

    添加环境变量并查看状态

    [root@master02 ~]# echo export PATH=$PATH:/opt/kubernetes/bin >> /etc/profile [root@master02 ~]# source /etc/profile [root@master02 ~]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.233.132 Ready <none> 23h v1.12.3 192.168.233.133 Ready <none> 23h v1.12.3

    2、nginx负载均衡集群部署

    两个nginx主机开局优化(仅展示nginx01的操作):关闭防火墙和核心防护,编辑nginx yum源

    [root@localhost ~]# hostnamectl set-hostname nginx01 '//修改主机吗' [root@localhost ~]# su [root@nginx01 ~]# [root@nginx01 ~]# systemctl stop firewalld && systemctl disable firewalld '//关闭防火墙与核心防护' [root@nginx01 ~]# setenforce 0 && sed -i "s/SELINUX=enforcing/SELNIUX=disabled/g" /etc/selinux/config [root@nginx01 ~]# vi /etc/yum.repos.d/nginx.repo '//编辑nginx的yum源' [nginx] name=nginx.repo baseurl=http://nginx.org/packages/centos/7/$basearch/ enabled=1 gpgcheck=0 [root@nginx01 ~]# yum clean all [root@nginx01 ~]# yum makecache

    两台nginx主机安装nginx并开启四层转发(仅展示nginx01的操作)

    [root@nginx01 ~]# yum -y install nginx '//安装nginx' [root@nginx01 ~]# vi /etc/nginx/nginx.conf ...省略内容 13 stream { 14 15 log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent'; 16 access_log /var/log/nginx/k8s-access.log main; ##指定日志目录 17 18 upstream k8s-apiserver { 19 #此处为master的ip地址和端口 20 server 192.168.233.131:6443; '//6443是apiserver的端口号' 21 #此处为master02的ip地址和端口 22 server 192.168.233.130:6443; 23 } 24 server { 25 listen 6443; 26 proxy_pass k8s-apiserver; 27 } 28 } 。。。省略内容

    启动nginx服务

    [root@nginx01 ~]# nginx -t '//检查nginx语法' nginx: the configuration file /etc/nginx/nginx.conf syntax is ok nginx: configuration file /etc/nginx/nginx.conf test is successful [root@nginx01 ~]# systemctl start nginx '//开启服务' [root@nginx01 ~]# systemctl status nginx [root@nginx01 ~]# netstat -ntap |grep nginx '//会检测出来6443端口' tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 1849/nginx: master tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 1849/nginx: master

    两台nginx主机部署keepalived服务(仅展示nginx01的操作)

    [root@nginx01 ~]# yum -y install keepalived [root@nginx01 ~]# vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { # 接收邮件地址 notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } # 邮件发送地址 notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_MASTER } vrrp_script check_nginx { script "/usr/local/nginx/sbin/check_nginx.sh" '//keepalived服务检查脚本的位置' } vrrp_instance VI_1 { state MASTER '//nginx02设置为BACKUP' interface ens33 virtual_router_id 51 '//nginx02可设置为52' priority 100 '//优先级,nginx02设置 90' advert_int 1 '//指定VRRP 心跳包通告间隔时间,默认1秒 ' authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.233.100/24 '//VIP地址' } track_script { check_nginx } }

    创建监控脚本,启动keepalived服务,查看VIP地址

    [root@nginx01 ~]# mkdir -p /usr/local/nginx/sbin/ '//创建监控脚本目录' [root@nginx01 ~]# vim /usr/local/nginx/sbin/check_nginx.sh '//编写监控脚本配置文件' count=$(ps -ef |grep nginx |egrep -cv "grep|$$") if [ "$count" -eq 0 ];then systemctl stop keepalived fi [root@nginx01 ~]# chmod +x /usr/local/nginx/sbin/check_nginx.sh '//给权限' [root@nginx01 ~]# systemctl start keepalived '//开启服务' [root@nginx01 ~]# systemctl status keepalived [root@nginx01 ~]# ip a '//两个nginx服务器查看IP地址' VIP在nginx01上 [root@nginx02 ~]# ip a

    验证漂移地址

    [root@nginx01 ~]# pkill nginx '//关闭nginx服务' [root@nginx01 ~]# systemctl status keepalived '//发现keepalived服务关闭了' [root@nginx02 ~]# ip a '//现在发现VIP地址跑到nginx02上了'

    恢复漂移地址的操作

    [root@nginx01 ~]# systemctl start nginx [root@nginx01 ~]# systemctl start keepalived '//先开启nginx,在启动keepalived服务' [root@nginx01 ~]# ip a '//再次查看,发现VIP回到了nginx01节点上'

    修改两个node节点配置文件(ootstrap.kubeconfig 、),统一VIP地址,仅展示node01节点的操作

    [root@node01 ~]# vi /opt/k8s/cfg/bootstrap.kubeconfig server: https://192.168.233.100:6443 '//此地址修改为VIP地址' [root@node01 ~]# vi /opt/k8s/cfg/kubelet.kubeconfig server: https://192.168.233.100:6443 '//此地址修改为VIP地址' [root@node01 ~]# vi /opt/k8s/cfg/kube-proxy.kubeconfig server: https://192.168.233.100:6443 '//此地址修改为VIP地址'

    重启两个node节点的服务

    [root@node01 ~]# systemctl restart kubelet [root@node01 ~]# systemctl restart kube-proxy [root@node01 ~]# cd /opt/k8s/cfg/ [root@node01 cfg]# grep 100 * '//VIP修改成功' bootstrap.kubeconfig: server: https://192.168.233.100:6443 kubelet.kubeconfig: server: https://192.168.233.100:6443 kube-proxy.kubeconfig: server: https://192.168.233.100:6443

    在nginx01上查看k8s日志

    [root@nginx01 ~]# tail /var/log/nginx/k8s-access.log '//下面的日志是重启服务的时候产生的' 192.168.233.132 192.168.233.131:6443 - [01/May/2020:01:25:59 +0800] 200 1121 192.168.233.132 192.168.233.131:6443 - [01/May/2020:01:25:59 +0800] 200 1121

    master节点测试创建pod

    [root@master ~]# kubectl run nginx --image=nginx '//创建一个nginx测试pod' kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead. deployment.apps/nginx created [root@master ~]# kubectl get pods '//查看状态,是正在创建' NAME READY STATUS RESTARTS AGE nginx-dbddb74b8-5s6h7 0/1 ContainerCreating 0 13s [root@master ~]# kubectl get pods '//稍等一下再次查看,发现pod已经创建完成,在master02节点也可以查看' NAME READY STATUS RESTARTS AGE nginx-dbddb74b8-5s6h7 1/1 Running 0 23s

    查看pod日志

    [root@master ~]# kubectl logs nginx-dbddb74b8-5s6h7 '//查看pod日志发现报错原因是权限问题' Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-5s6h7) [root@master ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous '//指定集群中的匿名用户有管理员权限' [root@master ~]# kubectl logs nginx-dbddb74b8-5s6h7 '//此时可以访问,但是没有日志产生'

    访问node节点的pod资源产生日志,并在两个master节点查看

    [root@master ~]# kubectl get pods -o wide '//查看podIP网络信息' NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE nginx-dbddb74b8-5s6h7 1/1 Running 0 6m29s 172.17.26.2 192.168.233.132 <none> [root@node01 ~]# curl 172.17.26.2 '//在对应的节点访问pod' [root@master ~]# kubectl logs nginx-dbddb74b8-5s6h7 '//再次在master节点访问日志情况,master02节点同样可以访问' 172.17.26.1 - - [30/Apr/2020:17:38:48 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
    Processed: 0.010, SQL: 8