Skip to content

Commit

Permalink
Add the :nearest_slave role for Sentinel mode
Browse files Browse the repository at this point in the history
This will cause the client to measure roundtrip latency to each slave
and select the slave with the lowest latency. The intent for this is
to enable sentinel-managed clusters of servers for which eventually-consistent
reads are acceptable, but to maintain minimum latencies between any
individual client-slave pair.

The case I did this for is is shared web application caching across multiple
datacenters, where you would not want Redis to connect to a slave in another
datacenter, but you would want all datacenters to share a cache.

Remove trailing comma from client creation options; should fix 1.8 builds

If we can't get the role, use a translated role

Ensure that ping test clients are always disconnected after use. Don't assume that a good slave was found.
  • Loading branch information
cheald committed Mar 4, 2016
1 parent 00c0f50 commit d1ee530
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 3 deletions.
40 changes: 37 additions & 3 deletions lib/redis/client.rb 100644 → 100755
Expand Up @@ -483,6 +483,10 @@ def check(client)
end

class Sentinel < Connector
EXPECTED_ROLES = {
"nearest_slave" => "slave"
}

def initialize(options)
super(options)

Expand All @@ -502,12 +506,12 @@ def check(client)
role = client.call([:role])[0]
rescue Redis::CommandError
# Assume the test is passed if we can't get a reply from ROLE...
role = @role
role = EXPECTED_ROLES.fetch(@role, @role)
end

if role != @role
if role != EXPECTED_ROLES.fetch(@role, @role)
client.disconnect
raise ConnectionError, "Instance role mismatch. Expected #{@role}, got #{role}."
raise ConnectionError, "Instance role mismatch. Expected #{EXPECTED_ROLES.fetch(@role, @role)}, got #{role}."
end
end

Expand All @@ -517,6 +521,8 @@ def resolve
resolve_master
when "slave"
resolve_slave
when "nearest_slave"
resolve_nearest_slave
else
raise ArgumentError, "Unknown instance role #{@role}"
end
Expand Down Expand Up @@ -566,6 +572,34 @@ def resolve_slave
end
end
end

def resolve_nearest_slave
sentinel_detect do |client|
if reply = client.call(["sentinel", "slaves", @master])
ok_slaves = reply.map {|r| Hash[*r] }.select {|r| r["master-link-status"] == "ok" }

ok_slaves.each do |slave|
client = Client.new @options.merge(
:host => slave["ip"],
:port => slave["port"],
:reconnect_attempts => 0
)
begin
client.call [:ping]
start = Time.now
client.call [:ping]
slave["response_time"] = (Time.now - start).to_f
ensure
client.disconnect
end
end

slave = ok_slaves.sort_by {|slave| slave["response_time"] }.first
{:host => slave.fetch("ip"), :port => slave.fetch("port")} if slave
end
end
end

end
end
end
Expand Down
44 changes: 44 additions & 0 deletions test/sentinel_test.rb 100644 → 100755
Expand Up @@ -252,4 +252,48 @@ def test_sentinel_retries

assert_match(/No sentinels available/, ex.message)
end

def test_sentinel_nearest_slave
sentinels = [{:host => "127.0.0.1", :port => 26381}]

master = { :role => lambda { ["master"] } }
s1 = { :role => lambda { ["slave"] }, :slave_id => lambda { ["1"] }, :ping => lambda { ["OK"] } }
s2 = { :role => lambda { ["slave"] }, :slave_id => lambda { ["2"] }, :ping => lambda { sleep 0.1; ["OK"] } }
s3 = { :role => lambda { ["slave"] }, :slave_id => lambda { ["3"] }, :ping => lambda { sleep 0.2; ["OK"] } }

5.times do
RedisMock.start(master) do |master_port|
RedisMock.start(s1) do |s1_port|
RedisMock.start(s2) do |s2_port|
RedisMock.start(s3) do |s3_port|

sentinel = lambda do |port|
{
:sentinel => lambda do |command, *args|
case command
when "slaves"
[
%W[master-link-status down ip 127.0.0.1 port #{s1_port}],
%W[master-link-status ok ip 127.0.0.1 port #{s2_port}],
%W[master-link-status ok ip 127.0.0.1 port #{s3_port}]
].shuffle
else
["127.0.0.1", port.to_s]
end
end
}
end

RedisMock.start(sentinel.call(master_port)) do |sen_port|
sentinels[0][:port] = sen_port
redis = Redis.new(:url => "redis://master1", :sentinels => sentinels, :role => :nearest_slave)
assert_equal redis.slave_id, ["2"]
end
end
end
end
end
end

end
end

0 comments on commit d1ee530

Please sign in to comment.