Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multi node pipeline executor #4070

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions src/main/java/redis/clients/jedis/ClusterPipeline.java
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

import java.time.Duration;
import java.util.Set;
import java.util.concurrent.ExecutorService;

import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import redis.clients.jedis.providers.ClusterConnectionProvider;
import redis.clients.jedis.util.IOUtils;
Expand Down Expand Up @@ -40,6 +42,12 @@ public ClusterPipeline(ClusterConnectionProvider provider, ClusterCommandObjects
this.provider = provider;
}

public ClusterPipeline(ClusterConnectionProvider provider, ClusterCommandObjects commandObjects,
ExecutorService executorService) {
super(commandObjects, executorService);
this.provider = provider;
}

private static ClusterCommandObjects createClusterCommandObjects(RedisProtocol protocol) {
ClusterCommandObjects cco = new ClusterCommandObjects();
if (protocol == RedisProtocol.RESP3) cco.setProtocol(protocol);
Expand Down
5 changes: 5 additions & 0 deletions src/main/java/redis/clients/jedis/JedisCluster.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;

import org.apache.commons.pool2.impl.GenericObjectPoolConfig;

Expand Down Expand Up @@ -379,6 +380,10 @@ public ClusterPipeline pipelined() {
return new ClusterPipeline((ClusterConnectionProvider) provider, (ClusterCommandObjects) commandObjects);
}

public ClusterPipeline pipelined(ExecutorService executorService) {
return new ClusterPipeline((ClusterConnectionProvider) provider, (ClusterCommandObjects) commandObjects, executorService);
}

/**
* @param doMulti param
* @return nothing
Expand Down
79 changes: 45 additions & 34 deletions src/main/java/redis/clients/jedis/MultiNodePipelineBase.java
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
package redis.clients.jedis;

import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

Expand All @@ -31,6 +31,7 @@ public abstract class MultiNodePipelineBase extends PipelineBase {

private final Map<HostAndPort, Queue<Response<?>>> pipelinedResponses;
private final Map<HostAndPort, Connection> connections;
private ExecutorService executorService;
private volatile boolean syncing = false;

public MultiNodePipelineBase(CommandObjects commandObjects) {
Expand All @@ -39,6 +40,13 @@ public MultiNodePipelineBase(CommandObjects commandObjects) {
connections = new LinkedHashMap<>();
}

public MultiNodePipelineBase(CommandObjects commandObjects, ExecutorService executorService) {
super(commandObjects);
this.executorService = executorService;
pipelinedResponses = new LinkedHashMap<>();
connections = new LinkedHashMap<>();
}

/**
* Sub-classes must call this method, if graph commands are going to be used.
* @param connectionProvider connection provider
Expand Down Expand Up @@ -96,44 +104,47 @@ public final void sync() {
return;
}
syncing = true;

ExecutorService executorService = Executors.newFixedThreadPool(MULTI_NODE_PIPELINE_SYNC_WORKERS);

CountDownLatch countDownLatch = new CountDownLatch(pipelinedResponses.size());
Iterator<Map.Entry<HostAndPort, Queue<Response<?>>>> pipelinedResponsesIterator
= pipelinedResponses.entrySet().iterator();
while (pipelinedResponsesIterator.hasNext()) {
Map.Entry<HostAndPort, Queue<Response<?>>> entry = pipelinedResponsesIterator.next();
HostAndPort nodeKey = entry.getKey();
Queue<Response<?>> queue = entry.getValue();
Connection connection = connections.get(nodeKey);
executorService.submit(() -> {
try {
List<Object> unformatted = connection.getMany(queue.size());
for (Object o : unformatted) {
queue.poll().set(o);
}
} catch (JedisConnectionException jce) {
log.error("Error with connection to " + nodeKey, jce);
// cleanup the connection
pipelinedResponsesIterator.remove();
connections.remove(nodeKey);
IOUtils.closeQuietly(connection);
} finally {
countDownLatch.countDown();
}
});
}

ExecutorService executorService = getExecutorService();
CompletableFuture[] futures
= pipelinedResponses.entrySet().stream()
.map(e -> CompletableFuture.runAsync(() -> closeConnection(e), executorService))
.toArray(CompletableFuture[]::new);
CompletableFuture awaitAllCompleted = CompletableFuture.allOf(futures);
try {
countDownLatch.await();
awaitAllCompleted.get();
if (executorService != this.executorService) {
executorService.shutdown();
}
} catch (ExecutionException e) {
log.error("Failed execution.", e);
} catch (InterruptedException e) {
log.error("Thread is interrupted during sync.", e);
Thread.currentThread().interrupt();
}
syncing = false;
}

executorService.shutdownNow();
private ExecutorService getExecutorService() {
if (executorService == null) {
return Executors.newFixedThreadPool(Math.min(this.pipelinedResponses.size(), MULTI_NODE_PIPELINE_SYNC_WORKERS));
}
return executorService;
}

syncing = false;
private void closeConnection(Map.Entry<HostAndPort, Queue<Response<?>>> entry) {
HostAndPort nodeKey = entry.getKey();
Queue<Response<?>> queue = entry.getValue();
Connection connection = connections.get(nodeKey);
try {
List<Object> unformatted = connection.getMany(queue.size());
for (Object o : unformatted) {
queue.poll().set(o);
}
} catch (JedisConnectionException jce) {
log.error("Error with connection to " + nodeKey, jce);
connections.remove(nodeKey);
IOUtils.closeQuietly(connection);
}
}

@Deprecated
Expand Down
24 changes: 24 additions & 0 deletions src/test/java/redis/clients/jedis/ClusterPipeliningTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
import static redis.clients.jedis.Protocol.CLUSTER_HASHSLOTS;

import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;

import org.hamcrest.MatcherAssert;
import org.hamcrest.Matchers;
Expand Down Expand Up @@ -1081,6 +1084,27 @@ public void transaction() {
}
}

@Test(timeout = 10_000L)
public void pipelineMergingWithExecutorService() {
final int maxTotal = 100;
ConnectionPoolConfig poolConfig = new ConnectionPoolConfig();
poolConfig.setMaxTotal(maxTotal);
ThreadPoolExecutor executorService = (ThreadPoolExecutor) Executors.newFixedThreadPool(10);
try (JedisCluster cluster = new JedisCluster(nodes, DEFAULT_CLIENT_CONFIG, 5, poolConfig)) {
ClusterPipeline pipeline = cluster.pipelined(executorService);
for (int i = 0; i < maxTotal; i++) {
String s = Integer.toString(i);
pipeline.set(s, s);
}
pipeline.close();
// The sync results in one pipeline per node needing closing.
assertEquals(nodes.size(), executorService.getTaskCount());
assertFalse(executorService.isShutdown());
} finally {
executorService.shutdown();
}
}

@Test(timeout = 10_000L)
public void multiple() {
final int maxTotal = 100;
Expand Down