kubernets多节点和负载均衡部署 K8S

    科技2024-07-07  68

    多节点部署

    //先具备单master节点部署环境 接K8S单节点部署

    master01:192.168.20.10 kube-apiserver kube-controller-manager kube-scheduler etcd master02:192.168.20.40 kube-apiserver kube-controller-manager kube-scheduler node1:192.168.20.20 kubelet kube-proxy docker flannel etcd node2:192.168.20.30 kubelet kube-proxy docker flannel etcd

    master02部署

    [root@localhost ~]# hostnamectl set-hostname master2 [root@localhost ~]# su

    //优先关闭防火墙和selinux服务

    [root@master2 ~]# iptables -F [root@master2 ~]# setenforce 0

    //在master01上操作 //复制kubernetes目录到master02

    [root@master ~]# scp -r /opt/kubernetes/ root@192.168.20.40:/opt The authenticity of host '192.168.20.40 (192.168.20.40)' can't be established. ECDSA key fingerprint is SHA256:v7t4p3JJLUnXziTqE64SOtmKTkJdbSB2hEykd+xG22c. ECDSA key fingerprint is MD5:85:90:0a:05:38:e9:e3:37:25:de:f0:08:71:9e:9d:c5. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '192.168.20.40' (ECDSA) to the list of known hosts. root@192.168.20.40's password: token.csv 100% 84 14.3KB/s 00:00 kube-apiserver 100% 929 96.4KB/s 00:00 kube-scheduler 100% 94 21.3KB/s 00:00 kube-controller-manager 100% 483 82.4KB/s 00:00 kube-apiserver 100% 184MB 20.4MB/s 00:09 kubectl 100% 55MB 13.7MB/s 00:04 kube-controller-manager 100% 155MB 16.9MB/s 00:09 kube-scheduler 100% 55MB 18.6MB/s 00:02 ca-key.pem 100% 1679 341.9KB/s 00:00 ca.pem 100% 1359 192.7KB/s 00:00 server-key.pem 100% 1679 301.5KB/s 00:00 server.pem 100% 1643 224.1KB/s 00:00

    //复制master中的三个组件启动脚本kube-apiserver.service kube-controller-

    manager.service kube-scheduler.service [root@master ~]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.20.40:/usr/lib/systemd/system/ root@192.168.20.40's password: kube-apiserver.service 100% 282 72.7KB/s 00:00 kube-controller-manager.service 100% 317 55.7KB/s 00:00 kube-scheduler.service 100% 281 42.4KB/s 00:00

    //master02上操作 //修改配置文件kube-apiserver中的IP

    [root@master2 ~]# cd /opt/kubernetes/cfg/ [root@master2 cfg]# vim kube-apiserver KUBE_APISERVER_OPTS="--logtostderr=true \ --v=4 \ --etcd-servers=https://192.168.20.10:2379,https://192.168.20.20:2379,https://192.168.20.30:2379 \ --bind-address=192.168.20.40 \ //这里改成master2的地址 --secure-port=6443 \ --advertise-address=192.168.20.40 \ //这里改成master2的地址 --allow-privileged=true \ --service-cluster-ip-range=10.0.0.0/24 \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ --authorization-mode=RBAC,Node \ --kubelet-https=true \ --enable-bootstrap-token-auth \ --token-auth-file=/opt/kubernetes/cfg/token.csv \ --service-node-port-range=30000-50000 \ --tls-cert-file=/opt/kubernetes/ssl/server.pem \ --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \ --client-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \ --etcd-cafile=/opt/etcd/ssl/ca.pem \ --etcd-certfile=/opt/etcd/ssl/server.pem \ --etcd-keyfile=/opt/etcd/ssl/server-key.pem"

    //特别注意:master02一定要有etcd证书 //需要拷贝master01上已有的etcd证书给master02使用 //在master01上操作

    [root@master ~]# scp -r /opt/etcd/ root@192.168.20.40:/opt/ root@192.168.20.40's password: etcd 100% 509 57.4KB/s 00:00 etcd 100% 18MB 19.9MB/s 00:00 etcdctl 100% 15MB 15.1MB/s 00:01 ca-key.pem 100% 1675 144.8KB/s 00:00 ca.pem 100% 1265 371.3KB/s 00:00 server-key.pem 100% 1679 287.1KB/s 00:00 server.pem 100% 1338 439.4KB/s 00:00

    //在master02上操作 //启动master02中的三个组件服务

    [root@master2 cfg]# systemctl start kube-apiserver.service [root@master2 cfg]# systemctl start kube-controller-manager.service [root@master2 cfg]# systemctl start kube-scheduler.service

    //增加环境变量

    [root@master2 cfg]# vim /etc/profile #末尾添加 export PATH=$PATH:/opt/kubernetes/bin/ [root@master2 cfg]# source /etc/profile [root@master2 cfg]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.20.20 Ready <none> 7d22h v1.12.3 192.168.20.30 Ready <none> 7d21h v1.12.3

    负载均衡部署

    //先具备多master节点部署环境 接K8S多节点部署 master1:192.168.20.10 kube-apiserver kube-controller-manager kube-scheduler etcd master2:192.168.20.40 node1:192.168.20.20 kubelet kube-proxy docker flannel etcd node2:192.168.20.30 kubelet kube-proxy docker flannel etcd 漂移地址:192.168.20.111 负载均衡LoadBalance lb01:master 192.168.20.50 lb02:backup 192.168.20.60

    //lb01 lb02操作 192.168.20.50

    [root@localhost ~]# hostnamectl set-hostname lb01 [root@localhost ~]# su

    192.168.20.60

    [root@localhost ~]# hostnamectl set-hostname lb02 [root@localhost ~]# su

    //优先关闭lb01和lb02的防火墙和selinux服务

    systemctl stop firewalld.service setenforce 0

    //安装nginx服务,把nginx.sh和keepalived.conf脚本拷贝到家目录

    vim /etc/yum.repos.d/nginx.repo [nginx] name=nginx repo baseurl=http://nginx.org/packages/centos/7/$basearch/ gpgcheck=0 yum install nginx -y //添加四层转发(在events {}http {}中间添加) vim /etc/nginx/nginx.conf events { worker_connections 1024; } stream { log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent'; access_log /var/log/nginx/k8s-access.log main; upstream k8s-apiserver { server 192.168.20.10:6443; //master01地址 server 192.168.20.40:6443; //master02地址 } server { listen 6443; proxy_pass k8s-apiserver; } } http {

    //启动nginx服务

    systemctl start nginx

    //部署keepalived服务

    yum install keepalived -y

    //修改配置文件

    cp keepalived.conf /etc/keepalived/keepalived.conf cp:是否覆盖"/etc/keepalived/keepalived.conf"? yes

    //注意:lb01是Mster配置如下:

    [root@lb01 ~]# vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { # 接收邮件地址 notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } # 邮件发送地址 notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_MASTER } vrrp_script check_nginx { script "/etc/nginx/check_nginx.sh" } vrrp_instance VI_1 { state MASTER interface ens33 virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 priority 100 # 优先级,备服务器设置 90 advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.20.111/24 } track_script { check_nginx } }

    //注意:lb02是Backup配置如下:

    [root@lb02 ~]# vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { # 接收邮件地址 notification_email { acassen@firewall.loc failover@firewall.loc sysadmin@firewall.loc } # 邮件发送地址 notification_email_from Alexandre.Cassen@firewall.loc smtp_server 127.0.0.1 smtp_connect_timeout 30 router_id NGINX_MASTER } vrrp_script check_nginx { script "/etc/nginx/check_nginx.sh" } vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 priority 90 # 优先级,备服务器设置 90 advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.20.111/24 } track_script { check_nginx } } vim /etc/nginx/check_nginx.sh count=$(ps -ef |grep nginx |egrep -cv "grep|$$") if [ "$count" -eq 0 ];then systemctl stop keepalived fi chmod +x /etc/nginx/check_nginx.sh systemctl start keepalived

    //查看lb01地址信息

    [root@lb01 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 00:0c:29:7f:10:c4 brd ff:ff:ff:ff:ff:ff inet 192.168.20.50/24 brd 192.168.20.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet 192.168.20.111/24 scope global secondary ens33 valid_lft forever preferred_lft forever inet6 fe80::2699:92a4:f2b8:7f88/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000 link/ether 52:54:00:64:d5:ef brd ff:ff:ff:ff:ff:ff inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0 valid_lft forever preferred_lft forever 4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000 link/ether 52:54:00:64:d5:ef brd ff:ff:ff:ff:ff:ff

    //漂移地址在lb01中

    //查看lb02地址信息

    [root@lb02 ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 00:0c:29:6e:8e:cd brd ff:ff:ff:ff:ff:ff inet 192.168.20.60/24 brd 192.168.20.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet6 fe80::761e:cabc:c27a:4cd4/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000 link/ether 52:54:00:c1:04:69 brd ff:ff:ff:ff:ff:ff inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0 valid_lft forever preferred_lft forever 4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000 link/ether 52:54:00:c1:04:69 brd ff:ff:ff:ff:ff:ff

    //验证地址漂移(lb01中使用pkill nginx,再在lb02中使用ip a 查看) //恢复操作(在lb01中先启动nginx服务,再启动keepalived服务) //nginx站点/usr/share/nginx/html

    //在node01和node02上操作 //开始修改node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)

    vim /opt/kubernetes/cfg/bootstrap.kubeconfig vim /opt/kubernetes/cfg/kubelet.kubeconfig vim /opt/kubernetes/cfg/kube-proxy.kubeconfig

    //统统修改为VIP

    server: https://192.168.20.111:6443 systemctl restart kubelet.service systemctl restart kube-proxy.service

    //替换完成直接自检

    cd /opt/kubernetes/cfg/ grep 111 * [root@node1 ~]# cd /opt/kubernetes/cfg/ [root@node1 cfg]# grep 111 * bootstrap.kubeconfig: server: https://192.168.20.111:6443 kubelet.kubeconfig: server: https://192.168.20.111:6443 kube-proxy.kubeconfig: server: https://192.168.20.111:6443 [root@node2 ~]# cd /opt/kubernetes/cfg/ [root@node2 cfg]# grep 111 * bootstrap.kubeconfig: server: https://192.168.20.111:6443 kubelet.kubeconfig: server: https://192.168.20.111:6443 kube-proxy.kubeconfig: server: https://192.168.20.111:6443

    //在lb01上 //查看nginx的k8s日志

    [root@lb01 ~]# tail /var/log/nginx/k8s-access.log 192.168.20.20 192.168.20.40:6443 - [08/Oct/2020:00:23:11 +0800] 200 1119 192.168.20.20 192.168.20.40:6443 - [08/Oct/2020:00:23:11 +0800] 200 1118 192.168.20.30 192.168.20.40:6443 - [08/Oct/2020:00:23:16 +0800] 200 1120 192.168.20.30 192.168.20.40:6443 - [08/Oct/2020:00:23:16 +0800] 200 1120

    //在master01上操作 //测试创建pod

    kubectl run nginx --image=nginx kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead. deployment.apps/nginx created

    //查看状态

    [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE nginx-dbddb74b8-s4rdt 0/1 ContainerCreating 0 33s //正在创建中 [root@master2 cfg]# kubectl get pods NAME READY STATUS RESTARTS AGE nginx-dbddb74b8-s4rdt 1/1 Running 0 80s //创建完成,运行中

    //注意日志问题

    [root@master ~]# kubectl logs nginx-dbddb74b8-s4rdt Error from server (Forbidden): Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy) ( pods/log nginx-dbddb74b8-s4rdt) [root@master ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

    //查看pod网络

    [root@master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE nginx-dbddb74b8-s4rdt 1/1 Running 0 4m18s 172.17.39.3 192.168.20.20 <none>

    //在对应网段的node节点上操作可以直接访问

    [root@node1 cfg]# curl 172.17.39.3 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html>

    //访问就会产生日志 //回到master01操作

    [root@master ~]# kubectl logs nginx-dbddb74b8-s4rdt /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/ /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh 10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf 10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh /docker-entrypoint.sh: Configuration complete; ready for start up 172.17.39.1 - - [07/Oct/2020:08:31:26 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"
    Processed: 0.009, SQL: 8