Keepalived+lvs,nginx高可用负载均衡器部署
项目简介
项目描述:
-
自动化配置: 通过使用 Ansible 工具,批量配置六台虚拟机,以提升集群部署效率并降低人工操作错误。
-
高可用设计: 部署两台虚拟机,配置 Keepalived 和 LVS,提供虚拟 IP,并将流量发往后端,实现高可用性。主服务器故障时,系统可自动切换节点,确保服务的无缝性。
-
静态资源处理与负载均衡: 部署两台虚拟机,利用 Nginx 处理用户的静态请求,并配置负载均衡,将动态资源请求均匀分配到动态资源处理服务器,从而提高服务响应速度。
-
动态资源处理: 配置两台虚拟机,利用Apache专门处理用户的动态请求,确保高效响应。
lvs1部署
网卡:ens33,ens36
外网IP:192.168.118.128 外网VIP:192.168.118.150
内网IP:192.168.140.128 内网VIP:192.168.140.150
# 安装keepalived和ipvsadm
yum install -y keepalived ipvsadm
# 编写keepalived配置文件 /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
lvs_id LVS_01
}
vrrp_sync_group VG1 {
group {
VI_1
VI_GATEWAY
}
}
vrrp_instance VI_1 {
state MASTER
interface ens33
lvs_sync_daemon_inteface ens33
virtual_router_id 51
priority 150
advert_int 3
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.118.150
}
}
vrrp_instance VI_GATEWAY {
state MASTER
interface ens36
lvs_sync_daemon_inteface ens36
virtual_router_id 52
priority 150
advert_int 3
authentication {
auth_type PASS
auth_pass example
}
virtual_ipaddress {
192.168.140.150
}
}
virtual_server 192.168.118.150 80 {
delay_loop 6
lb_algo rr
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
real_server 192.168.140.130 80 {
weight 1
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.140.131 80 {
weight 1
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
}
}
}
# 开启nat路由转发
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
sysctl -p
# 放行80端口关闭selinux
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
# 启动keepalived,并查看ipvsadm
systemctl start keepalived
systemctl enable keepalived
ipvsadm -Ln
lvs2部署
网卡:ens33,ens36
外网IP:192.168.118.129 外网VIP:192.168.118.150
内网IP:192.168.140.129 内网VIP:192.168.140.150
# 安装keepalived和ipvsadm
yum install -y keepalived ipvsadm
# 编写keepalived配置文件 /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
lvs_id LVS_01
}
vrrp_sync_group VG1 {
group {
VI_1
VI_GATEWAY
}
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
lvs_sync_daemon_inteface ens33
virtual_router_id 51
priority 100
advert_int 3
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.118.150
}
}
vrrp_instance VI_GATEWAY {
state BACKUP
interface ens36
lvs_sync_daemon_inteface ens36
virtual_router_id 52
priority 100
advert_int 3
authentication {
auth_type PASS
auth_pass example
}
virtual_ipaddress {
192.168.140.150
}
}
virtual_server 192.168.118.150 80 {
delay_loop 6
lb_algo rr
lb_kind NAT
nat_mask 255.255.255.0
protocol TCP
real_server 192.168.140.130 80 {
weight 1
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.140.131 80 {
weight 1
TCP_CHECK {
connect_timeout 10
nb_get_retry 3
delay_before_retry 3
}
}
}
# 开启nat路由转发
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
sysctl -p
# 放行80端口关闭selinux
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
# 启动keepalived,并查看ipvsadm
systemctl start keepalived
systemctl enable keepalived
ipvsadm -Ln
nginx1部署
网卡:ens33
内网IP:192.168.140.130
# 安装nginx并放行80端口关闭selinux
yum install -y epel-release
yum install -y nginx
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
# 配置静态请求处理和负载均衡
http {
upstream backend {
server 192.168.140.131 weight=1;
server 192.168.140.132 weight=1;
}
server {
listen 80;
server_name 192.168.118.150;
# 处理静态请求
location /static {
root /var/www/html; # 静态文件存放目录
}
# 处理动态请求
location / {
proxy_pass http://backend; # 转发到动态请求处理的后端
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
#配置网关指向lvs内网VIP
nmcli con mod ens33 ipv4.addresses 192.168.140.130/24 ipv4.gateway 192.168.140.150 ipv4.dns 114.114.114.114 ipv4.method manual
#启动nginx
systemctl start nginx
systemctl enable nginx
nginx2部署
网卡:ens33
内网IP:192.168.140.131
# 安装nginx并放行80端口关闭selinux
yum install -y epel-release
yum install -y nginx
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
# 配置静态请求处理和负载均衡
http {
upstream backend {
server 192.168.140.131 weight=1;
server 192.168.140.132 weight=1;
}
server {
listen 80;
server_name 192.168.118.150;
# 处理静态请求
location /static {
root /var/www/html; # 静态文件存放目录
}
# 处理动态请求
location / {
proxy_pass http://backend; # 转发到动态请求处理的后端
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
#配置网关指向lvs内网VIP
nmcli con mod ens33 ipv4.addresses 192.168.140.131/24 ipv4.gateway 192.168.140.150 ipv4.dns 114.114.114.114 ipv4.method manual
#启动nginx
systemctl start nginx
systemctl enable nginx
apache1部署
网卡:ens33
内网IP:192.168.140.132
#下载httpd和php71w全家桶
yum install -y httpd
yum -y install php71w php71w-cli php71w-commonphp71w-mysqlnd
#输入一些信息
echo "this is server1">/var/www/html/index.html
# 放行80端口关闭selinux
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
#启动httpd
systemctl start httpd
systemctl enable httpd
apache2部署
网卡:ens33
内网IP:192.168.140.133
#下载httpd和php71w
yum install -y httpd
yum -y install php71w php71w-cli php71w-commonphp71w-mysqlnd
#输入一些信息
echo "this is server2">/var/www/html/index.html
# 放行80端口关闭selinux
firewall-cmd --zone=public --add-port=80/tcp --permanent
setenforce 0
#启动httpd
systemctl start httpd
systemctl enable httpd
效果
[root@localhost ~]# for i in {1..10};do curl 192.168.118.150;done;
this is server2
this is server1
this is server1
this is server2
this is server1
this is server2
this is server1
this is server2
this is server2
this is server2
[root@localhost ~]#
ansible自动化部署
# 修改/etc/hosts方便使用主机名管理主机
vim /etc/hosts
192.168.118.128 server1
192.168.118.129 server2
192.168.140.130 server3
192.168.140.131 server4
192.168.140.132 server5
# 生成密钥对
ssh-keygen -P "" -t rsa
# 发送给对应主机
ssh-copy-id -i /root/.ssh/id_rsa.pub root@server1
ssh-copy-id -i /root/.ssh/id_rsa.pub root@server2
ssh-copy-id -i /root/.ssh/id_rsa.pub root@server3
ssh-copy-id -i /root/.ssh/id_rsa.pub root@server4
ssh-copy-id -i /root/.ssh/id_rsa.pub root@server5
# 配置主机清单 /etc/ansible/hosts
vim /etc/ansible/hosts
[all_servers]
server1
server2
server3
server4
server5
server6
[keepalived_lvs_1]
server1
[keepalived_lvs_2]
server2
[nginx_loadbalancers]
server3
server4
[nginx_loadbalancer_1]
server3
[nginx_loadbalancer_2]
server4
[app_servers]
server5
server6
[app_server_1]
server5
[app_server_2]
server6
# 编写playbook
- name: Close firewalld and selinux
hosts: all_servers
remote_user: root
tasks:
- name: open port=80
shell: "firewall-cmd --zone=public --add-port=80/tcp --permanent"
- name: close selinux
selinux:
policy: targeted
state: disabled
- name: Configuration keepalived+lvs_1
hosts: keepalived_lvs_1
remote_user: root
tasks:
- name: install keepalived+lvs
yum:
name:
- keepalived
- ipvsadm
state: latest
- name: keepalived_config
copy:
src: /root/keepalived1.conf
dest: /etc/keepalived/keepalived.conf
- name: Start keepalived
service:
name: keepalived
state: started
enabled: yes
- name: nat_config
shell: "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
- name: Configuration keepalived+lvs_2
hosts: keepalived_lvs_2
remote_user: root
tasks:
- name: install keepalived+lvs
yum:
name:
- keepalived
- ipvsadm
state: latest
- name: keepalived_config
copy:
src: /root/keepalived2.conf
dest: /etc/keepalived/keepalived.conf
- name: Start keepalived
service:
name: keepalived
state: started
enabled: yes
- name: nat_config
shell: "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
- name: Install nginx
hosts: nginx_loadbalancers
remote_user: root
tasks:
- name: install nginx
yum:
name: nginx
state: latest
- name: nginx_config
copy:
src: /root/nginx.conf
dest: /etc/nginx/nginx.conf
- name: start nginx
service:
name: nginx
state: started
enabled: yes
- name: Configuration nginx_1
hosts: nginx_loadbalancer_1
remote_user: root
tasks:
- name: network_config
shell: "nmcli con mod ens33 ipv4.addresses 192.168.140.130/24 ipv4.gateway 192.168.140.150 ipv4.dns 114.114.114.114 ipv4.method manual"
- name: Configuration nginx_2
hosts: nginx_loadbalancer_2
remote_user: root
tasks:
- name: network_config
shell: "nmcli con mod ens33 ipv4.addresses 192.168.140.131/24 ipv4.gateway 192.168.140.150 ipv4.dns 114.114.114.114 ipv4.method manual"
- name: Install httpd php71w
hosts: app_servers
remote_user: root
tasks:
- name: install httpd php71w
yum:
name:
- httpd
- php71w
- php71w-cli
- php71w-common
- php71w-mysqlnd
state: present
- name: start httpd
service:
name: httpd
state: started
enabled: yes
- name: Configuration server1
hosts: app_server_1
remote_user: root
tasks:
- name: html_index
shell: "echo 'this is server1'>/var/www/html/index.html"
- name: Configuration server2
hosts: app_server_2
remote_user: root
tasks:
- name: html_index
shell: "echo 'this is server2'>/var/www/html/index.html"
评论区