1.redis master-slave同步详细过程
-
Redis 支持主从复制分为全量同步和增量同步, 首次同步是全量同步,主从同步可以让从服务器从主服务器备份数据,而且从服务器还可与有从服务器,即另外一台 redis 服务器可以从一台从服务器进行数据同步, redis 的主从同步是非阻塞的,其收到从服务器的 sync(2.8 版本之前是 PSYNC)命令会fork 一个子进程在后台执行 bgsave 命令,并将新写入的数据写入到一个缓冲区里面, bgsave 执行完成之后并生成的将 RDB 文件发送给客户端,客户端将收到后的 RDB 文件载入自己的内存,然后主 redis将缓冲区的内容在全部发送给从 redis,之后的同步从服务器会发送一个 offset 的位置(等同于 MySQL的 binlog 的位置)给主服务器,主服务器检查后位置没有错误将此位置之后的数据包括写在缓冲区的积压数据发送给 redis 从服务器,从服务器将主服务器发送的挤压数据写入内存,这样一次完整的数据同步,再之后再同步的时候从服务器只要发送当前的 offset 位 置给主服务器,然后主服务器根据响应的位置将之后的数据发送给从服务器保存到其内存即可。
-
Redis 全量复制一般发生在 Slave 初始化阶段,这时 Slave 需要将 Master 上的所有数据都复制一份。具
体步骤如下:1)从服务器连接主服务器,发送 SYNC 命令;
2)主服务器接收到 SYNC 命名后,开始执行 BGSAVE 命令生成 RDB 快照文件并使用缓冲区记录此后执行的所有写命令;
3)主服务器 BGSAVE 执行完后,向所有从服务器发送快照文件,并在发送期间继续记录被执行的写命令;
4)从服务器收到快照文件后丢弃所有旧数据,载入收到的快照;
5)主服务器快照发送完毕后开始向从服务器发送缓冲区中的写命令;
6)从服务器完成对快照的载入,开始接收命令请求,并执行来自主服务器缓冲区的写命令;
7) 后期同步会先发送自己 slave_repl_offset 位置, 只同步新增加的数据, 不再全量同步。image.png
2.实现master-svale主从同步
#由于epel源自带的redis是3,现需要装高版本redis
#安装cenots官方提供的sclo_rh库
yum install https://cbs.centos.org/kojifiles/packages/centos-release-scl-rh/2/3.el7.centos/noarch/centos-release-scl-rh-2-3.el7.centos.noarch.rpm -y
yum install rh-redis5-redis -y
ln -sv /opt/rh/rh-redis5/root/usr/bin/* /usr/bin/
#编译安装
groupadd -g 980 redis
useradd -r -s /sbin/noglin -u 986 -g 980 redis
cd /usr/local/src
tar xf redis-5.0.8.tar.gz
cd redis-5.0.8
make PREFIX=/apps/redis install
mkdir /apps/redis/etc
cp redis.conf /apps/redis/etc/
chown -R redis.redis /apps/redis/
ln -sv /apps/redis/bin/redis-* /usr/bin/
redis-server /apps/redis/etc/redis.conf #前台启动
#创建服务启动文件
vim /usr/lib/systemd/system/redis.service
[Unit]
Description=Redis persistent key-value database
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/apps/redis/bin/redis-server /apps/redis/etc/redis.conf --supervised systemd
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/bin/kill -s QUIT $MAINPID
Type=notify
User=root
Group=root
RuntimeDirectory=redis
RuntimeDirectoryMode=0755
[Install]
WantedBy=multi-user.target
#解决启动时的三个警告
redis-server /apps/redis/etc/redis.conf #前台启动看报错
vim /etc/sysctl.conf
net.core.somaxconn = 512 # WARNING: The TCP backlog setting of 511
vm.overcommit_memory = 1 # WARNING overcommit_memory is set to 0!
sysctl -p
#WARNING you have Transparent Huge Pages (THP) support enabled in your kernel
echo never > /sys/kernel/mm/transparent_hugepage/enabled #开启大页内存动态分配,需要关闭让 redis 负责内存管理
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.d/rc.local
chmod +x /etc/rc.d/rc.local
systemctl start redis
#主从配置
#master
redis-cli
127.0.0.1:6379> flushall
OK
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.7
requirepass 123456
logfile "/apps/redis/logs/redis-6379.log"
mkdir /apps/redis/logs
systemctl restart redis
scp /usr/lib/systemd/system/redis.service 192.168.37.17:/usr/lib/systemd/system/
scp -r /apps/redis 192.168.37.17:/apps/
#slave
ln -sv /apps/redis/bin/* /usr/bin/
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.17
logfile "/apps/redis/logs/redis-6379.log"
mkdir /apps/redis/logs
systemctl restart redis
#手动同步方法
redis-cli
auth 123456
slaveof 192.168.37.7 6379
config set masterauth 123456
127.0.0.1:6379> info replication
# Replication
role:slave
master_host:192.168.37.7
master_port:6379
master_link_status:up
master_last_io_seconds_ago:3
master_sync_in_progress:0
slave_repl_offset:42
slave_priority:100
slave_read_only:1
connected_slaves:0
master_replid:37b9d665ee1e718742c312c69919f6937f54e91b
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:42
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:42
#从节点自动同步
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.27
logfile "/apps/redis/logs/redis-6379.log"
replicaof 192.168.37.7 6379 #slaveof=replicaof
masterauth 123456
systemctl restart redis
#性能优化
vim /apps/redis/etc/redis.conf
#yes 为支持 disk, master 将 RDB 文件先保存到磁盘在发送给 slave, no 为 master直接将 RDB 文件发送给 slave,默认即为使用 no, Master RDB 文件不需要与磁盘交互。
repl-diskless-sync yes
#Master 准备好 RDB 文件后等等待传输时间
repl-diskless-sync-delay 5
#slave 端向 server 端发送 ping 的时间区间设置,默认为 10 秒
repl-ping-slave-period 10
#设置超时时间
repl-timeout 60
#是否启用 TCP_NODELAY, 如设置成 yes,则 redis 会合并小的 TCP 包从而节省带宽,但会增加同步延迟(40ms),造成 master 与 slave 数据不一致,假如设置成 no,则 redis master会立即发送同步数据,没有延迟,前者关注性能,后者关注一致性
repl-disable-tcp-nodelay no
#master 的写入数据缓冲区, 用于记录自上一次同步后到下一次同步过程中间的写入命令,计算公式: b repl-backlog-size = 允许从节点最大中断时长 * 主实例 offset 每秒写入量, 比如 master 每秒最大写入 64mb, 最大允许 60 秒,那么就要设置为 64mb*60 秒=3840mb(3.8G)
repl-backlog-size 1mb
#如果一段时间后没有 slave 连接到 master,则 backlog size 的内存将会被释放。如果值为 0 则表示永远不释放这部份内存。
repl-backlog-ttl 3600
#slave 端的优先级设置,值是一个整数,数字越小表示优先级越高。当 master 故障时将会按照优先级来选择 slave 端进行恢复,如果值设置为 0,则表示该 slave 永远不会被选择。
replica-priority 100
#min-slaves-to-write 1 #设置一个master端的可用slave少于多少个
#min-slaves-max-lag 20 #设置所有slave延迟时间都大于多少秒时,master不接收写操作(拒绝写入)
3.redis 哨兵机制及实现
#哨兵可以跟redis不在同一服务器
#环境:3台机器:slave1:192.168.37.7 slave2:192.168.37.17 sentinel:192.168.37.67
#先还原配置
#ha1、ha2
cp -p /apps/redis/etc/redis.conf.bak /apps/redis/etc/redis.conf
#master-sentinel
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.67
logfile "/apps/redis/logs/redis-6379.log"
requirepass 123456
cp /usr/local/src/redis-5.0.8/sentinel.conf /apps/redis/etc/
vim /apps/redis/etc/sentinel.conf
bind 0.0.0.0
port 26379
daemonize yes
pidfile "/var/run/redis-sentinel.pid"
logfile "sentinel-6379.log"
dir "/apps/redis/logs"
sentinel monitor master1 192.168.37.67 6379 2 #最后2是指redis-server的总数量
sentinel auth-pass master1 123456
sentinel down-after-milliseconds master1 5000
sentinel parallel-syncs master1 1
sentinel failover-timeout master1 2000
sentinel deny-scripts-reconfig yes
#sentinel服务启动文件
vim /usr/lib/systemd/system/redis-sentinel.service
[Unit]
Description=Redis persistent key-value database
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/apps/redis/bin/redis-sentinel /apps/redis/etc/sentinel.conf --supervised systemd
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/bin/kill -s QUIT $MAINPID
Type=notify
User=root
Group=root
RuntimeDirectory=redis
RuntimeDirectoryMode=0755
[Install]
WantedBy=multi-user.target
#启动服务
systemctl start redis redis-sentinel
scp /apps/redis/etc/sentinel.conf 192.168.37.7:/apps/redis/etc/
scp /apps/redis/etc/sentinel.conf 192.168.37.17:/apps/redis/etc/
scp /usr/lib/systemd/system/redis-sentinel.service 192.168.37.7:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/redis-sentinel.service 192.168.37.17:/usr/lib/systemd/system/
#slave1
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.17
logfile "/apps/redis/logs/redis-6379.log"
replicaof 192.168.37.67 6379
masterauth 123456
sentinel monitor master1 192.168.37.67 6379 2 #最后2是指redis-server的总数量
sentinel auth-pass master1 123456
sentinel deny-scripts-reconfig yes
systemctl restart redis redis-sentinel
#slave2
vim /apps/redis/etc/redis.conf
bind 127.0.0.1 192.168.37.27
logfile "/apps/redis/logs/redis-6379.log"
replicaof 192.168.37.67 6379
masterauth 123456
sentinel monitor master1 192.168.37.67 6379 2 #最后2是指redis-server的总数量
sentinel auth-pass master1 123456
sentinel deny-scripts-reconfig yes
systemctl restart redis redis-sentinel
#slave1查看故障信息
tail -f /apps/redis/logs/sentinel.log
redis-cli -h 192.168.37.67 -p 26379
info sentinel
#master模拟故障
systemctl stop redis
4.redis cluster机制及实现
Redis 分布式部署方案
1) 客户端分区: 由客户端程序决定 key 写分配和写入的 redis node, 但是需要客户端自己处理写入分配、高可用管理和故障转移等
2)代理方案: 基于三方软件实现 redis proxy,客户端先连接之代理层,由代理层实现 key 的写入分配,对客户端来说是有比较简单,但是对于集群管节点增减相对比较麻烦,而且代理本身也是单点和性能瓶颈。
在哨兵 sentinel 机制中,可以解决 redis 高可用的问题, 即当 master 故障后可以自动将 slave 提升为master 从而可以保证 redis 服务的正常使用,但是无法解决 redis 单机写入的瓶颈问题, 即单机的 redis写入性能受限于单机的内存大小、 并发数量、 网卡速率等因素,因此 redis 官方在 redis 3.0 版本之后推出了无中心架构的 redis cluster 机制, 在无中心的 redis 集群汇中,其每个节点保存当前节点数据和整个集群状态,每个节点都和其他所有节点连接, 特点如下:
1: 所有 Redis 节点使用(PING 机制)互联
2:集群中某个节点的失效, 是整个集群中超过半数的节点监测都失效才算真正的失效
3: 客户端不需要 proxy 即可直接连接 redis, 应用程序需要写全部的 redis 服务器 IP
4: redis cluster 把所有的 redis node 映射到 0-16383 个槽位(slot)上, 读写需要到指定的 redis node 上进行操作,因此有多少个 reids node 相当于 redis 并发扩展了多少倍
5: Redis cluster 预先分配 16384 个(slot)槽位,当需要在 redis 集群中写入一个 key -value 的时候,会使用 CRC16(key) mod 16384 之后的值,决定将 key 写入值哪一个槽位从而决定写入哪一个 Redis 节点上, 从而有效解决单机瓶颈
#环境:6台主机配置集群,192.168.37.7 192.168.37.17 192.168.37.27 192.168.37.37 192.168.37.47 192.168.37.57
#先把哨兵还原
mv /apps/redis/etc/sentinel.conf /apps/redis/etc/sentinel.conf.bak
cp /apps/redis/etc/redis.conf.bak /apps/redis/etc/redis.conf
rm -rf /apps/redis/logs/*
#redis1程序文件打包并传送到其他服务器
mkdir /apps/redis/snapshot
tar -zcf redis_5.0.8.tar.gz /apps/redis
scp redis_5.0.8.tar.gz root@192.168.37.17:/root
scp redis_5.0.8.tar.gz root@192.168.37.27:/root
scp redis_5.0.8.tar.gz root@192.168.37.37:/root
scp redis_5.0.8.tar.gz root@192.168.37.47:/root
scp redis_5.0.8.tar.gz root@192.168.37.57:/root
scp /usr/lib/systemd/system/redis.service root@192.168.37.17:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/redis.service root@192.168.37.27:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/redis.service root@192.168.37.37:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/redis.service root@192.168.37.47:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/redis.service root@192.168.37.57:/usr/lib/systemd/system/
#redis2~6解包redis程序
tar -xf redis.tar.gz -C /
ln -sv /apps/redis/bin/redis-* /usr/bin
systemctl daemon-reload
#集群
#redis1
mkdir /apps/redis/snapshot
vim /apps/redis/etc/redis.conf
bind 0.0.0.0
pidfile /var/run/redis_6379.pid
logfile "/apps/redis/logs/redis-6379.log"
requirepass 123456
cluster-enabled yes #开启集群
cluster-config-file nodes-6379.conf
masterauth 123456
#快照
stop-writes-on-bgsave-error no #快照出错时是否禁止 redis 写入操作
dbfilename dump.rdb #快照文件名
dir /apps/redis/snapshot #快照目录
scp /apps/redis/etc/redis.conf root@192.168.37.17:/apps/redis/etc/
scp /apps/redis/etc/redis.conf root@192.168.37.27:/apps/redis/etc/
scp /apps/redis/etc/redis.conf root@192.168.37.37:/apps/redis/etc/
scp /apps/redis/etc/redis.conf root@192.168.37.47:/apps/redis/etc/
scp /apps/redis/etc/redis.conf root@192.168.37.57:/apps/redis/etc/
#redis2~6
vim /etc/sysctl.conf
net.core.somaxconn = 512
vm.overcommit_memory = 1
sysctl -p
echo 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' >> /etc/rc.d/rc.local
chmod +x /etc/rc.d/rc.local
systemctl start redis
#创建集群,所有机器都启动服务,并保证没有数据,在任意一台redis机器上执行下面命令即可完成集群
redis-cli -a 123456 --cluster create 192.168.37.7:6379 192.168.37.17:6379 192.168.37.27:6379 192.168.37.37:6379 192.168.37.47:6379 192.168.37.57:6379 --cluster-replicas 1
Can I set the above configuration? (type 'yes' to accept): yes
#查看集群
redis-cli -h 192.168.37.7 -p 6379
cluster info
cluster nodes
#验证数据写入
redis-cli -a 123456 -h 192.168.37.7 -p 6379
192.168.37.7:6379> set name mage
(error) MOVED 5798 192.168.37.17:6379
redis-cli -a 123456 -h 192.168.37.17 -p 6379
192.168.37.17:6379> set name1 mage
(error) MOVED 12933 192.168.37.27:6379
redis-cli -a 123456 -h 192.168.37.27 -p 6379
192.168.37.27:6379> set name1 mage
OK
192.168.37.27:6379> keys *
1) "name1"
192.168.37.17:6379> keys *
(empty list or set)
192.168.37.7:6379> keys *
(empty list or set)
#集群状态
# redis-cli -a 123456 --cluster check 192.168.37.7:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
192.168.37.7:6379 (a4e1678a...) -> 0 keys | 5461 slots | 1 slaves.
192.168.37.27:6379 (fd16b75c...) -> 1 keys | 5461 slots | 1 slaves.
192.168.37.17:6379 (5f0b0e17...) -> 0 keys | 5462 slots | 1 slaves.
[OK] 1 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.37.7:6379)
M: a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b 192.168.37.7:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: f033ca316f8f083eeafc6ecf81dee6b943f6c25e 192.168.37.47:6379
slots: (0 slots) slave
replicates a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b
M: fd16b75cdf1a05843406247587414d32c52dbb91 192.168.37.27:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: bca894d6f69a96d223f5ede02cd0f96f93e189e2 192.168.37.37:6379
slots: (0 slots) slave
replicates fd16b75cdf1a05843406247587414d32c52dbb91
M: 5f0b0e17db7e32a59ba719c80c96072355ffa9ec 192.168.37.17:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: f18f4b7f4e52a0d624f4c4d9c54e33ef45c04d35 192.168.37.57:6379
slots: (0 slots) slave
replicates 5f0b0e17db7e32a59ba719c80c96072355ffa9ec
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
#集群管理
1、添加节点
#在57上创建一个redis-6380模拟新机器
cp /apps/redis/etc/redis{.conf.bak,_6380.conf}
vim /apps/redis/etc/redis_6380.conf
bind 192.168.37.57
port 6380
pidfile /var/run/redis_6380.pid
logfile "/apps/redis/logs/redis-6380.log"
requirepass 123456
cluster-enabled yes #开启集群
cluster-config-file nodes-6380.conf
masterauth 123456
#快照
stop-writes-on-bgsave-error no #快照出错时是否禁止 redis 写入操作
dbfilename dump.rdb #快照文件名
dir /apps/redis/snapshot_6380 #快照目录
mkdir /apps/redis/snapshot_6380
redis-server /apps/redis/etc/redis_6380.conf &
#添加节点到集群
redis-cli -a 123456 --cluster add-node 192.168.37.57:6380 192.168.37.7:6379
#分配槽位,分配后为master
redis-cli -a 123456 --cluster reshard 192.168.37.57:6380
>>> Performing Cluster Check (using node 192.168.37.57:6380)
M: ab666139a5979c029f7255a29cc0049e39b87af9 192.168.37.57:6380
slots: (0 slots) master
S: f18f4b7f4e52a0d624f4c4d9c54e33ef45c04d35 192.168.37.57:6379
slots: (0 slots) slave
replicates 5f0b0e17db7e32a59ba719c80c96072355ffa9ec
M: a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b 192.168.37.7:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: bca894d6f69a96d223f5ede02cd0f96f93e189e2 192.168.37.37:6379
slots: (0 slots) slave
replicates fd16b75cdf1a05843406247587414d32c52dbb91
S: f033ca316f8f083eeafc6ecf81dee6b943f6c25e 192.168.37.47:6379
slots: (0 slots) slave
replicates a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b
M: fd16b75cdf1a05843406247587414d32c52dbb91 192.168.37.27:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: 5f0b0e17db7e32a59ba719c80c96072355ffa9ec 192.168.37.17:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096 #16384/4
What is the receiving node ID? ab666139a5979c029f7255a29cc0049e39b87af9 #接受槽位ID
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: all #将哪些源主机的槽位分配给 192.168.7.104:6379, all 是自动在所有的 redis node 选择划分,如果是从 redis cluster 删除主机可以使用此方式将主机上的槽位全部移动到别的 redis 主机
Do you want to proceed with the proposed reshard plan (yes/no)? yes #确认分配
#新添加节点,不分配槽位,把它变为slave
redis-cli -h 192.168.37.57 -p 6380
cluster nodes #找到目标master id
cluster replicate a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b
#被迁移 Redis 服务器必须保证没有数据和槽位
#master需要把槽位释放出来
redis-cli -a 123456 --cluster reshard 192.168.37.57:6380
How many slots do you want to move (from 1 to 16384)? 4096 #当前节点槽位数量
What is the receiving node ID? a4e1678a089fd5c0c6ef6dff10cb3db9c66db07b #接受ID
Source node #1: ab666139a5979c029f7255a29cc0049e39b87af9 #分配的ID
Source node #1: done
Do you want to proceed with the proposed reshard plan (yes/no)? yes
#删除节点
redis-cli -a 123456 --cluster del-node 192.168.37.57:6380 ab666139a5979c029f7255a29cc0049e39b87af9
#停止master并验证故障转移
systemctl stop redis
redis-cli -a 123456 --cluster check 192.168.37.57:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
Could not connect to Redis at 192.168.37.7:6379: Connection refused
192.168.37.27:6379 (fd16b75c...) -> 0 keys | 5461 slots | 1 slaves.
192.168.37.17:6379 (5f0b0e17...) -> 0 keys | 5461 slots | 1 slaves.
192.168.37.47:6379 (f033ca31...) -> 1 keys | 5462 slots | 0 slaves.
[OK] 1 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.37.57:6379)
S: f18f4b7f4e52a0d624f4c4d9c54e33ef45c04d35 192.168.37.57:6379
slots: (0 slots) slave
replicates 5f0b0e17db7e32a59ba719c80c96072355ffa9ec
M: fd16b75cdf1a05843406247587414d32c52dbb91 192.168.37.27:6379
slots:[0-2729],[13653-16383] (5461 slots) master
1 additional replica(s)
M: 5f0b0e17db7e32a59ba719c80c96072355ffa9ec 192.168.37.17:6379
slots:[5462-6826],[8192-12287] (5461 slots) master
1 additional replica(s)
M: f033ca316f8f083eeafc6ecf81dee6b943f6c25e 192.168.37.47:6379 #7已下线,47接管当master
slots:[2730-5461],[6827-8191],[12288-13652] (5462 slots) master
S: bca894d6f69a96d223f5ede02cd0f96f93e189e2 192.168.37.37:6379
slots: (0 slots) slave
replicates fd16b75cdf1a05843406247587414d32c52dbb91
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
#验证数据写入
redis-cli -h 192.168.37.47 -p 6379
192.168.37.47:6379> auth 123456
OK
192.168.37.47:6379> keys *
1) "name1"
192.168.37.47:6379> set name2 wang
(error) MOVED 742 192.168.37.27:6379
192.168.37.47:6379> set name3 wang
OK
192.168.37.47:6379> set name4 wang
(error) MOVED 8736 192.168.37.17:6379
192.168.37.47:6379> set name5 wang
OK
192.168.37.47:6379> set name7 wang
OK
192.168.37.47:6379> set name8 wang
(error) MOVED 9132 192.168.37.17:6379
#数据备份
cp /apps/redis/snapshot/dump.rdb /backup/redis_dump.rdb
5.单机memcache及双机repcached(已被redis替代)
#单机memcached
yum install memcached -y
vim /etc/sysconfig/memcached
PORT="11211" #监听端口
USER="memcached" #启动用户
MAXCONN="1024" #最大连接数
CACHESIZE="1024" #最大使用内存
OPTIONS="" #其他选项
systemctl start memcached
#python操作memcache
yum install python-memcached -y
vim use_memcache.sh
#!/usr/bin/env python
import memcache
m = memcache.Client(['192.168.37.7:11211'], debug=True)
for i in range(100):
m.set("key%d" % i,"v%d" % i)
ret = m.get('key%d' % i)
print ret
[root@ha1 ~]# telnet 192.168.37.7 11211
Trying 192.168.37.7...
Connected to 192.168.37.7.
Escape character is '^]'.
keys
ERROR
keys *
ERROR
get name
END
ERROR
set name 0 0 4
jack
STORED
get name
VALUE name 0 4
jack
END
quit
Connection closed by foreign host.
#高可用memcached(基本没人用了)
1、编译安装repcached
#memcached1
yum install libevent libevent-devel
wget https://sourceforge.net/projects/repcached/files/repcached/2.2.1-1.2.8/memcached-1.2.8-repcached-2.2.1.tar.gz
tar xf memcached-1.2.8-repcached-2.2.1.tar.gz -C /usr/local/src/
cd /usr/local/src/memcached-1.2.8-repcached-2.2.1/
vim memcached.c
#删除57、60行,删除后如下
56 #ifndef IOV_MAX
57 # define IOV_MAX 1024
58 #endif
./configure --prefix=/apps/repcached --enable-replication
make && make install
cd ..
scp -r memcached-1.2.8-repcached-2.2.1/ 192.168.37.17:/usr/local/src
#memcache2
cd /usr/local/src/memcached-1.2.8-repcached-2.2.1/
./configure --prefix=/apps/repcached --enable-replication
make && make install
2、启动memcached
#memcache-1
# -d:后台 -m:内存 -p:端口 -u:运行用户 -c:并发数 -x:mencache对方IP -X:本地监听端口
/apps/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 192.168.37.17 -X 16000
#memcache-2
/apps/repcached/bin/memcached -d -m 2048 -p 11211 -u root -c 2048 -x 192.168.37.7 -X 16000
网友评论