Spring integration TCP Server multiple connections of more than 5 - spring

I'm using the following version of Spring Boot and Spring integration now.
spring.boot.version 2.3.4.RELEASE
spring-integration 5.3.2.RELEASE
My requirement is to create a TCP client server communication and i'm using spring integration for the same. The spike works fine for a single communication between client and server and also works fine for exactly 5 concurrent client connections.
The moment i have increased the concurrent client connections from 5 to any arbitary numbers, it doesn't work but the TCP server accepts only 5 connections.
I have used the 'ThreadAffinityClientConnectionFactory' mentioned by #Gary Russell in one of the earlier comments ( for similar requirements ) but still doesn't work.
Below is the code i have at the moment.
#Slf4j
#Configuration
#EnableIntegration
#IntegrationComponentScan
public class SocketConfig {
#Value("${socket.host}")
private String clientSocketHost;
#Value("${socket.port}")
private Integer clientSocketPort;
#Bean
public TcpOutboundGateway tcpOutGate(AbstractClientConnectionFactory connectionFactory) {
TcpOutboundGateway gate = new TcpOutboundGateway();
//connectionFactory.setTaskExecutor(taskExecutor());
gate.setConnectionFactory(clientCF());
return gate;
}
#Bean
public TcpInboundGateway tcpInGate(AbstractServerConnectionFactory connectionFactory) {
TcpInboundGateway inGate = new TcpInboundGateway();
inGate.setConnectionFactory(connectionFactory);
inGate.setRequestChannel(fromTcp());
return inGate;
}
#Bean
public MessageChannel fromTcp() {
return new DirectChannel();
}
// Outgoing requests
#Bean
public ThreadAffinityClientConnectionFactory clientCF() {
TcpNetClientConnectionFactory tcpNetClientConnectionFactory = new TcpNetClientConnectionFactory(clientSocketHost, serverCF().getPort());
tcpNetClientConnectionFactory.setSingleUse(true);
ThreadAffinityClientConnectionFactory threadAffinityClientConnectionFactory = new ThreadAffinityClientConnectionFactory(
tcpNetClientConnectionFactory);
// Tested with the below too.
// threadAffinityClientConnectionFactory.setTaskExecutor(taskExecutor());
return threadAffinityClientConnectionFactory;
}
// Incoming requests
#Bean
public AbstractServerConnectionFactory serverCF() {
log.info("Server Connection Factory");
TcpNetServerConnectionFactory tcpNetServerConnectionFactory = new TcpNetServerConnectionFactory(clientSocketPort);
tcpNetServerConnectionFactory.setSerializer(new CustomSerializer());
tcpNetServerConnectionFactory.setDeserializer(new CustomDeserializer());
tcpNetServerConnectionFactory.setSingleUse(true);
return tcpNetServerConnectionFactory;
}
#Bean
public TaskExecutor taskExecutor () {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(50);
executor.setMaxPoolSize(100);
executor.setQueueCapacity(50);
executor.setAllowCoreThreadTimeOut(true);
executor.setKeepAliveSeconds(120);
return executor;
}
}
Did anyone had the same issue with having multiple concurrent Tcp client connections of more than 5 ?
Thanks
Client Code:
#Component
#Slf4j
#RequiredArgsConstructor
public class ScheduledTaskService {
// Timeout in milliseconds
private static final int SOCKET_TIME_OUT = 18000;
private static final int BUFFER_SIZE = 32000;
private static final int ETX = 0x03;
private static final String HEADER = "ABCDEF ";
private static final String data = "FIXED DARATA"
private final AtomicInteger atomicInteger = new AtomicInteger();
#Async
#Scheduled(fixedDelay = 100000)
public void sendDataMessage() throws IOException, InterruptedException {
int numberOfRequests = 10;
Callable<String> executeMultipleSuccessfulRequestTask = () -> socketSendNReceive();
final Collection<Callable<String>> callables = new ArrayList<>();
IntStream.rangeClosed(1, numberOfRequests).forEach(i-> {
callables.add(executeMultipleSuccessfulRequestTask);
});
ExecutorService executorService = Executors.newFixedThreadPool(numberOfRequests);
List<Future<String>> taskFutureList = executorService.invokeAll(callables);
List<String> strings = taskFutureList.stream().map(future -> {
try {
return future.get(20000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
} catch (TimeoutException e) {
e.printStackTrace();
}
return "";
}).collect(Collectors.toList());
strings.forEach(string -> log.info("Message received from the server: {} ", string));
}
public String socketSendNReceive() throws IOException{
int requestCounter = atomicInteger.incrementAndGet();
String host = "localhost";
int port = 8000;
Socket socket = new Socket();
InetSocketAddress address = new InetSocketAddress(host, port);
socket.connect(address, SOCKET_TIME_OUT);
socket.setSoTimeout(SOCKET_TIME_OUT);
//Send the message to the server
OutputStream os = socket.getOutputStream();
BufferedOutputStream bos = new BufferedOutputStream(os);
bos.write(HEADER.getBytes());
bos.write(data.getBytes());
bos.write(ETX);
bos.flush();
// log.info("Message sent to the server : {} ", envio);
//Get the return message from the server
InputStream is = socket.getInputStream();
String response = receber(is);
log.info("Received response");
return response;
}
private String receber(InputStream in) throws IOException {
final StringBuffer stringBuffer = new StringBuffer();
int readLength;
byte[] buffer;
buffer = new byte[BUFFER_SIZE];
do {
if(Objects.nonNull(in)) {
log.info("Input Stream not null");
}
readLength = in.read(buffer);
log.info("readLength : {} ", readLength);
if(readLength > 0){
stringBuffer.append(new String(buffer),0,readLength);
log.info("String ******");
}
} while (buffer[readLength-1] != ETX);
buffer = null;
stringBuffer.deleteCharAt(resposta.length()-1);
return stringBuffer.toString();
}
}

Since you are opening the connections all at the same time, you need to increase the backlog property on the server connection factory.
It defaults to 5.
/**
* The number of sockets in the connection backlog. Default 5;
* increase if you expect high connection rates.
* #param backlog The backlog to set.
*/
public void setBacklog(int backlog) {

Related

Spring Boot WebSocket URL Not Responding and RxJS Call Repetition?

I'm trying to follow a guide to WebSockets at https://www.devglan.com/spring-boot/spring-boot-angular-websocket
I'd like it to respond to ws://localhost:8448/wsb/softlayer-cost-file, but I'm sure I misunderstood something. I'd like to get it to receive a binary file and issue periodic updates as the file is being processed.
Questions are:
How come Spring does not respond to my requests despite all the multiple URLs I try (see below).
Does my RxJS call run once and then conclude, or does it keep running until some closure has happened? Sorry to ask what might be obvious to others.
On my Spring Boot Server start, I see no errors. After about 5-7 minutes of running, I saw the following log message:
INFO o.s.w.s.c.WebSocketMessageBrokerStats - WebSocketSession[0 current WS(0)-HttpStream(0)-HttpPoll(0), 0 total, 0 closed abnormally (0 connect failure, 0 send limit, 0 transport error)], stompSubProtocol[processed CONNECT(0)-CONNECTED(0)-DISCONNECT(0)], stompBrokerRelay[null], inboundChannel[pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0], outboundChannel[pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0], sockJsScheduler[pool size = 6, active threads = 1, queued tasks = 0, completed tasks = 5]
I've pointed my browser at these URLs and can't get the Spring Boot server to show any reaction:
ws://localhost:8448/app/message
ws://localhost:8448/greeting/app/message
ws://localhost:8448/topic
ws://localhost:8448/queue
(I got the initial request formed in Firefox, then clicked edit/resend to try again).
WebSocketConfig.java
#Configuration
#EnableWebSocketMessageBroker
public class WebSocketConfig extends AbstractWebSocketMessageBrokerConfigurer {
#Autowired
CostFileUploadWebSocketHandler costFileUploadWebSocketHandler;
public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) {
registry.addHandler(new SocketTextHandler(), "/wst");
registry.addHandler(costFileUploadWebSocketHandler, "/wsb/softlayer-cost-file");
}
#Override
public void configureMessageBroker(MessageBrokerRegistry config) {
config.enableSimpleBroker("/topic/", "/queue/");
config.setApplicationDestinationPrefixes("/app");
}
#Override
public void registerStompEndpoints(StompEndpointRegistry registry) {
registry.addEndpoint("/greeting").setAllowedOrigins("*");
// .withSockJS();
}
}
CostFileUploadWebSocketHandler.java
#Component
public class CostFileUploadWebSocketHandler extends BinaryWebSocketHandler {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private SoftLayerJobService softLayerJobService;
private SoftLayerService softLayerService;
private AuthenticationFacade authenticationFacade;
#Autowired
CostFileUploadWebSocketHandler(SoftLayerJobService softLayerJobService, SoftLayerService softLayerService,
AuthenticationFacade authenticationFacade) {
this.softLayerJobService = softLayerJobService;
this.softLayerService = softLayerService;
this.authenticationFacade = authenticationFacade;
}
Map<WebSocketSession, FileUploadInFlight> sessionToFileMap = new WeakHashMap<>();
#Override
public boolean supportsPartialMessages() {
return true;
}
class WebSocketProgressReporter implements ProgressReporter {
private WebSocketSession session;
public WebSocketProgressReporter(WebSocketSession session) {
this.session = session;
}
#Override
public void reportCurrentProgress(BatchStatus currentBatchStatus, long currentPercentage) {
try {
session.sendMessage(new TextMessage("BatchStatus "+currentBatchStatus));
session.sendMessage(new TextMessage("Percentage Complete "+currentPercentage));
} catch(IOException e) {
throw new RuntimeException(e);
}
}
}
#Override
protected void handleBinaryMessage(WebSocketSession session, BinaryMessage message) throws Exception {
ByteBuffer payload = message.getPayload();
FileUploadInFlight inflightUpload = sessionToFileMap.get(session);
if (inflightUpload == null) {
throw new IllegalStateException("This is not expected");
}
inflightUpload.append(payload);
if (message.isLast()) {
File fileNameSaved = save(inflightUpload.name, "websocket", inflightUpload.bos.toByteArray());
BatchStatus currentBatchStatus = BatchStatus.UNKNOWN;
long percentageComplete;
ProgressReporter progressReporter = new WebSocketProgressReporter(session);
SoftLayerCostFileJobExecutionThread softLayerCostFileJobExecutionThread =
new SoftLayerCostFileJobExecutionThread(softLayerService, softLayerJobService, fileNameSaved,progressReporter);
logger.info("In main thread about to begin separate thread");
ForkJoinPool.commonPool().submit(softLayerCostFileJobExecutionThread);
while(!softLayerCostFileJobExecutionThread.jobDone());
// softLayerCostFileJobExecutionThread.run();
// Wait for above to complete somehow
// StepExecution foundStepExecution = jobExplorer.getJobExecution(
// jobExecutionThread.getJobExecutionResult().getJobExecution().getId()
// ).getStepExecutions().stream().filter(stepExecution->stepExecution.getStepName().equals("softlayerUploadFile")).findFirst().orElseGet(null);
// if (!"COMPLETED".equals(jobExecutionResult.getExitStatus())) {
// throw new UploadFileException(file.getOriginalFilename() + " exit status: " + jobExecutionResult.getExitStatus());
// }
logger.info("In main thread after separate thread submitted");
session.sendMessage(new TextMessage("UPLOAD "+inflightUpload.name));
session.close();
sessionToFileMap.remove(session);
logger.info("Uploaded "+inflightUpload.name);
}
String response = "Upload Chunk: size "+ payload.array().length;
logger.debug(response);
}
private File save(String fileName, String prefix, byte[] data) throws IOException {
Path basePath = Paths.get(".", "uploads", prefix, UUID.randomUUID().toString());
logger.info("Saving incoming cost file "+fileName+" to "+basePath);
Files.createDirectories(basePath);
FileChannel channel = new FileOutputStream(Paths.get(basePath.toString(), fileName).toFile(), false).getChannel();
channel.write(ByteBuffer.wrap(data));
channel.close();
return new File(basePath.getFileName().toString());
}
#Override
public void afterConnectionEstablished(WebSocketSession session) throws Exception {
sessionToFileMap.put(session, new FileUploadInFlight(session));
}
static class FileUploadInFlight {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
String name;
String uniqueUploadId;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
/**
* Fragile constructor - beware not prod ready
* #param session
*/
FileUploadInFlight(WebSocketSession session) {
String query = session.getUri().getQuery();
String uploadSessionIdBase64 = query.split("=")[1];
String uploadSessionId = new String(Base64Utils.decodeUrlSafe(uploadSessionIdBase64.getBytes()));
List<String> sessionIdentifiers = Splitter.on("\\").splitToList(uploadSessionId);
String uniqueUploadId = session.getRemoteAddress().toString()+sessionIdentifiers.get(0);
String fileName = sessionIdentifiers.get(1);
this.name = fileName;
this.uniqueUploadId = uniqueUploadId;
logger.info("Preparing upload for "+this.name+" uploadSessionId "+uploadSessionId);
}
public void append(ByteBuffer byteBuffer) throws IOException{
bos.write(byteBuffer.array());
}
}
}
Below is a snippet of Angular code where I make the call to the websocket. The service is intended to receive a file, then provide regular updates of percentage complete until the service is completed. Does this call need to be in a loop, or does the socket run until it's closed?
Angular Snippet of call to WebSocket:
this.softlayerService.uploadBlueReportFile(this.blueReportFile)
.subscribe(data => {
this.showLoaderBlueReport = false;
this.successBlueReport = true;
this.blueReportFileName = "No file selected";
this.responseBlueReport = 'File '.concat(data.fileName).concat(' ').concat('is ').concat(data.exitStatus);
this.blueReportSelected = false;
this.getCurrentUserFiles();
},
(error)=>{
if(error.status === 504){
this.showLoaderBlueReport = false;
this.stillProcessing = true;
}else{
this.showLoaderBlueReport = false;
this.displayUploadBlueReportsError(error, 'File upload failed');
}
});
}

AggregatingReplyingKafkaTemplate releaseStrategy Question

There seem to be an issue when I use AggregatingReplyingKafkaTemplate with template.setReturnPartialOnTimeout(true) in that, it returns timeout exception even if partial results are available from consumers.
In example below, I have 3 consumers to reply to the request topic and i've set the reply timeout at 10 seconds. I've explicitly delayed the response of Consumer 3 to 11 seconds, however, I expect the response back from Consumer 1 and 2, so, I can return partial results. However, I am getting KafkaReplyTimeoutException. Appreciate your inputs. Thanks.
I follow the code based on the Unit Test below.
[ReplyingKafkaTemplateTests][1]
I've provided the actual code below:
#RestController
public class SumController {
#Value("${kafka.bootstrap-servers}")
private String bootstrapServers;
public static final String D_REPLY = "dReply";
public static final String D_REQUEST = "dRequest";
#ResponseBody
#PostMapping(value="/sum")
public String sum(#RequestParam("message") String message) throws InterruptedException, ExecutionException {
AggregatingReplyingKafkaTemplate<Integer, String, String> template = aggregatingTemplate(
new TopicPartitionOffset(D_REPLY, 0), 3, new AtomicInteger());
String resultValue ="";
String currentValue ="";
try {
template.setDefaultReplyTimeout(Duration.ofSeconds(10));
template.setReturnPartialOnTimeout(true);
ProducerRecord<Integer, String> record = new ProducerRecord<>(D_REQUEST, null, null, null, message);
RequestReplyFuture<Integer, String, Collection<ConsumerRecord<Integer, String>>> future =
template.sendAndReceive(record);
future.getSendFuture().get(5, TimeUnit.SECONDS); // send ok
System.out.println("Send Completed Successfully");
ConsumerRecord<Integer, Collection<ConsumerRecord<Integer, String>>> consumerRecord = future.get(10, TimeUnit.SECONDS);
System.out.println("Consumer record size "+consumerRecord.value().size());
Iterator<ConsumerRecord<Integer, String>> iterator = consumerRecord.value().iterator();
while (iterator.hasNext()) {
currentValue = iterator.next().value();
System.out.println("response " + currentValue);
System.out.println("Record header " + consumerRecord.headers().toString());
resultValue = resultValue + currentValue + "\r\n";
}
} catch (Exception e) {
System.out.println("Error Message is "+e.getMessage());
}
return resultValue;
}
public AggregatingReplyingKafkaTemplate<Integer, String, String> aggregatingTemplate(
TopicPartitionOffset topic, int releaseSize, AtomicInteger releaseCount) {
//Create Container Properties
ContainerProperties containerProperties = new ContainerProperties(topic);
containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
//Set the consumer Config
//Create Consumer Factory with Consumer Config
DefaultKafkaConsumerFactory<Integer, Collection<ConsumerRecord<Integer, String>>> cf =
new DefaultKafkaConsumerFactory<>(consumerConfigs());
//Create Listener Container with Consumer Factory and Container Property
KafkaMessageListenerContainer<Integer, Collection<ConsumerRecord<Integer, String>>> container =
new KafkaMessageListenerContainer<>(cf, containerProperties);
// container.setBeanName(this.testName);
AggregatingReplyingKafkaTemplate<Integer, String, String> template =
new AggregatingReplyingKafkaTemplate<>(new DefaultKafkaProducerFactory<>(producerConfigs()), container,
(list, timeout) -> {
releaseCount.incrementAndGet();
return list.size() == releaseSize;
});
template.setSharedReplyTopic(true);
template.start();
return template;
}
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test_id");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringDeserializer.class);
return props;
}
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections to the Kakfa cluster
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.StringSerializer.class);
return props;
}
public ProducerFactory<Integer,String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#KafkaListener(id = "def1", topics = { D_REQUEST}, groupId = "D_REQUEST1")
#SendTo // default REPLY_TOPIC header
public String dListener1(String in) throws InterruptedException {
return "First Consumer : "+ in.toUpperCase();
}
#KafkaListener(id = "def2", topics = { D_REQUEST}, groupId = "D_REQUEST2")
#SendTo // default REPLY_TOPIC header
public String dListener2(String in) throws InterruptedException {
return "Second Consumer : "+ in.toLowerCase();
}
#KafkaListener(id = "def3", topics = { D_REQUEST}, groupId = "D_REQUEST3")
#SendTo // default REPLY_TOPIC header
public String dListener3(String in) throws InterruptedException {
Thread.sleep(11000);
return "Third Consumer : "+ in;
}
}
'''
[1]: https://github.com/spring-projects/spring-kafka/blob/master/spring-kafka/src/test/java/org/springframework/kafka/requestreply/ReplyingKafkaTemplateTests.java
template.setReturnPartialOnTimeout(true) simply means the template will consult the release strategy on timeout (with the timeout argument = true, to tell the strategy it's a timeout rather than a delivery call).
It must return true to release the partial result.
This is to allow you to look at (and possibly modify) the list to decide whether you want to release or discard.
Your strategy ignores the timeout parameter:
(list, timeout) -> {
releaseCount.incrementAndGet();
return list.size() == releaseSize;
});
You need return timeout ? true : { ... }.

How To Config Lettuce Redis cluster Async Connection Pool

I'm configuring my lettuce rediscluster pool. When I configure it according to the official documentation, the connection pool cannot be initialized properly and I can't get the connection. The official docs state:
RedisClusterClient clusterClient =
RedisClusterClient.create(RedisURI.create(host, port));
AsyncPool<StatefulRedisConnection<String, String>> pool = AsyncConnectionPoolSupport.createBoundedObjectPool( () -> clusterClient.connectAsync(StringCodec.UTF8), BoundedPoolConfig.create());
// execute work
CompletableFuture<String> setResult = pool.acquire().thenCompose(connection -> {
RedisAsyncCommands<String, String> async = connection.async();
async.set("key", "value");
return async.async.set("key2", "value2").whenComplete((s, throwable) -> pool.release(c));
});
// terminating
pool.closeAsync();
// after pool completion
client.shutdownAsync();
this config not work in my environment .then i add minIdle config :
final BoundedPoolConfig.Builder builder = BoundedPoolConfig.builder();
builder.minIdle(9);
It worked at the beginning, but when I loop through the connection pool and send commands from multiple times, the following exeception is thrown:
java.util.concurrent.ExecutionException: java.lang.IllegalStateException: AsyncPool is closed at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:357) at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1895)
Here are all my code:
private String passwd = "xxxxx";
private String ip = "10.0.0.204";
;
#Bean
#Scope("singleton")
public ClientResources clientResources() {
final DefaultClientResources defaultClientResources = DefaultClientResources.builder()
.ioThreadPoolSize(4)
.computationThreadPoolSize(4)
.build();
return defaultClientResources;
}
#Bean(destroyMethod = "shutdown")
#Scope("singleton")
public RedisClusterClient clusterClient(ClientResources clientResources) {
final String ip = "10.0.0.204";
final String passwd = "dingXiang123";
final RedisURI redisURI1 = RedisURI.Builder.redis(ip, 7001).withPassword(passwd).build();
final RedisURI redisURI2 = RedisURI.Builder.redis(ip, 7002).withPassword(passwd).build();
final RedisURI redisURI3 = RedisURI.Builder.redis(ip, 7003).withPassword(passwd).build();
final RedisURI redisURI4 = RedisURI.Builder.redis(ip, 7004).withPassword(passwd).build();
final RedisURI redisURI5 = RedisURI.Builder.redis(ip, 7005).withPassword(passwd).build();
final RedisURI redisURI6 = RedisURI.Builder.redis(ip, 7006).withPassword(passwd).build();
RedisClusterClient clusterClient = null;
try {
final List<RedisURI> redisURIS = Arrays.asList(redisURI1, redisURI2, redisURI3, redisURI4, redisURI5, redisURI6);
clusterClient = RedisClusterClient.create(clientResources, redisURIS);
ClusterTopologyRefreshOptions topologyRefreshOptions = ClusterTopologyRefreshOptions.builder()
.enableAdaptiveRefreshTrigger(ClusterTopologyRefreshOptions.RefreshTrigger.MOVED_REDIRECT, ClusterTopologyRefreshOptions.RefreshTrigger.PERSISTENT_RECONNECTS)
//连接池refresh超时时间
.adaptiveRefreshTriggersTimeout(Duration.ofMinutes(3))
.build();
clusterClient.setOptions(ClusterClientOptions.builder()
.topologyRefreshOptions(topologyRefreshOptions)
.autoReconnect(true)
.pingBeforeActivateConnection(true)
.build());
final RedisAdvancedClusterAsyncCommands<String, String> async = clusterClient.connect().async();
final RedisFuture<String> set = async.set("aa", "aaaaa");
set.get();
log.info("客户端初始化成功");
return clusterClient;
} catch (Exception e) {
log.error("lettce客户端初始化失败,{}", e);
if (clusterClient != null) {
clusterClient.shutdown();
}
}
return null;
}
/**
* 初始化异步的 Cluter 模式链接池
*
* #param clusterClient
* #return
*/
#Bean()
#DependsOn("clusterClient")
#Scope("singleton")
public BoundedAsyncPool<StatefulRedisClusterConnection<String, String>> lettucePool(RedisClusterClient clusterClient) {
final BoundedPoolConfig.Builder builder = BoundedPoolConfig.builder();
builder.minIdle(9);
final BoundedPoolConfig boundedPoolConfig = builder.build();
final BoundedAsyncPool<StatefulRedisClusterConnection<String, String>> lettucePool = AsyncConnectionPoolSupport.createBoundedObjectPool(
() -> clusterClient.connectAsync(StringCodec.UTF8)
, boundedPoolConfig
);
log.info("连接池初始化成功");
return lettucePool;
}
/**
* 从连接池获取链接
*
* #param lettucePool
*/
#Bean
#DependsOn("lettucePool")
public CompletableFuture<StatefulRedisClusterConnection<String, String>> clusterAsync(BoundedAsyncPool<StatefulRedisClusterConnection<String, String>> lettucePool) {
final CompletableFuture<StatefulRedisClusterConnection<String, String>> acquire = lettucePool.acquire();
return acquire;
}
Have you encountered this problem again, how did you solve it?
The other point is that I don't really like redisTemplate to operate the letuce API, so I am looking for a configuration solution for the native Lettuce cluster pool.
Have you done the configuration of the original cluster pool or the use of Api before, or have seen the detailed Demo documentation, if you have, please recommend it to me (of course, I also read the official document, I may Need a Demo Application to study )

Spark-Streaming CustomReceiver Unknown Host Exception

I am new to spark streaming. I want to stream a url online in order to retrieve info from a certain URL, I used the JavaCustomReceiver in order to stream a url.
This is the code I'm using (source)
public class JavaCustomReceiver extends Receiver<String> {
private static final Pattern SPACE = Pattern.compile(" ");
public static void main(String[] args) throws Exception {
SparkConf sparkConf = new SparkConf().setAppName("JavaCustomReceiver");
JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000));
JavaReceiverInputDStream<String> lines = ssc.receiverStream(
new JavaCustomReceiver("http://stream.meetup.com/2/rsvps", 80));
JavaDStream<String> words = lines.flatMap(new
FlatMapFunction<String, String>() {
#Override
public Iterator<String> call(String x) {
return Arrays.asList(SPACE.split(x)).iterator();
}
});
JavaPairDStream<String, Integer> wordCounts = words.mapToPair(
new PairFunction<String, String, Integer>() {
#Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
#Override
public Integer call(Integer i1, Integer i2) {
return i1 + i2;
}
});
wordCounts.print();
ssc.start();
ssc.awaitTermination();
}
String host = null;
int port = -1;
public JavaCustomReceiver(String host_, int port_) {
super(StorageLevel.MEMORY_AND_DISK_2());
host = host_;
port = port_;
}
public void onStart() {
new Thread() {
#Override
public void run() {
receive();
}
}.start();
}
public void onStop() {
}
private void receive() {
try {
Socket socket = null;
BufferedReader reader = null;
String userInput = null;
try {
// connect to the server
socket = new Socket(host, port);
reader = new BufferedReader(
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
// Until stopped or connection broken continue reading
while (!isStopped() && (userInput = reader.readLine()) != null) {
System.out.println("Received data '" + userInput + "'");
store(userInput);
}
} finally {
Closeables.close(reader, /* swallowIOException = */ true);
Closeables.close(socket, /* swallowIOException = */ true);
}
restart("Trying to connect again");
} catch (ConnectException ce) {
// restart if could not connect to server
restart("Could not connect", ce);
} catch (Throwable t) {
restart("Error receiving data", t);
}
}
}
However, I keep getting a java.net.UnknownHostException
How can I fix this? What is wrong with the code that I'm using ?
After reading the code of the custom receiver referenced, it is clear that it is a TCP receiver that connects to a host:port and not an HTTP receiver that could take an URL. You'll have to change the code to read from an HTTP endpoint.

Spring 4.0.5 websockt integration apollo with the exception "Message broker is not active"

For a project,i want to switch the project from simple broker to full-feature broker(like rabiitmq, apollo).
the exception stack:
Caused by: org.springframework.messaging.MessageDeliveryException: Message broker is not active.
at org.springframework.messaging.simp.stomp.StompBrokerRelayMessageHandler.handleMessageInternal(StompBrokerRelayMessageHandler.java:392)
at org.springframework.messaging.simp.broker.AbstractBrokerMessageHandler.handleMessage(AbstractBrokerMessageHandler.java:177)
at org.springframework.messaging.support.ExecutorSubscribableChannel.sendInternal(ExecutorSubscribableChannel.java:64)
at org.springframework.messaging.support.AbstractMessageChannel.send(AbstractMessageChannel.java:116)
at org.springframework.messaging.support.AbstractMessageChannel.send(AbstractMessageChannel.java:98)
at org.springframework.messaging.simp.SimpMessagingTemplate.doSend(SimpMessagingTemplate.java:125)
at org.springframework.messaging.simp.SimpMessagingTemplate.doSend(SimpMessagingTemplate.java:48)
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:94)
at org.springframework.messaging.core.AbstractMessageSendingTemplate.convertAndSend(AbstractMessageSendingTemplate.java:144)
at org.springframework.messaging.core.AbstractMessageSendingTemplate.convertAndSend(AbstractMessageSendingTemplate.java:112)
at org.springframework.messaging.core.AbstractMessageSendingTemplate.convertAndSend(AbstractMessageSendingTemplate.java:107)
at cn.clickmed.cmcp.websocket.plain.util.WebSocketUtil.sendMessage(WebSocketUtil.java:61)
at cn.clickmed.cmcp.websocket.plain.util.WebSocketUtil.sendMessage(WebSocketUtil.java:65)
at cn.clickmed.cmcp.websocket.plain.entity.WebSocketEntity.postPersist(WebSocketEntity.java:26)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
here is the config code:
#Configuration
#EnableWebSocketMessageBroker
public class WebSocketConfig extends AbstractWebSocketMessageBrokerConfigurer{
#Override
public void registerStompEndpoints(StompEndpointRegistry registry) {
for(String mapping : WebSocketConstant.WEBSOCKETTYPE.keySet()) {
registry.addEndpoint(mapping).withSockJS();
}
}
#Override
public void configureMessageBroker(MessageBrokerRegistry registry) {
registry.setApplicationDestinationPrefixes("/websocket");
// registry.enableSimpleBroker("/topic");
registry.enableStompBrokerRelay("/topic","/queue").setRelayHost("localhost").setRelayPort(61613).setSystemHeartbeatReceiveInterval(2000).setSystemHeartbeatSendInterval(2000);
// registry.setUserDestinationPrefix("/topic/");
}
#Bean
public Broker broker() throws Exception {
final Broker broker = new Broker();
// Configure STOMP over WebSockects connector
final AcceptingConnectorDTO ws = new AcceptingConnectorDTO();
ws.id = "ws";
ws.bind = "ws://localhost:61613";
ws.protocols.add( new StompDTO() );
// Create a topic with name 'test'
final TopicDTO topic = new TopicDTO();
topic.id = "todoListener";
// Create virtual host (based on localhost)
final VirtualHostDTO host = new VirtualHostDTO();
host.id = "localhost";
host.topics.add( topic );
host.host_names.add( "localhost" );
host.host_names.add( "127.0.0.1" );
host.auto_create_destinations = false;
// Create a web admin UI (REST) accessible at: http://localhost:61680/api/index.html#!/
final WebAdminDTO webadmin = new WebAdminDTO();
webadmin.bind = "http://localhost:61680";
// Create JMX instrumentation
final JmxDTO jmxService = new JmxDTO();
jmxService.enabled = true;
// Finally, glue all together inside broker configuration
final BrokerDTO config = new BrokerDTO();
config.connectors.add( ws );
config.virtual_hosts.add( host );
config.web_admins.add( webadmin );
config.services.add( jmxService );
broker.setConfig( config );
broker.setTmp( new File( System.getProperty( "java.io.tmpdir" ) ) );
broker.start( new Runnable() {
#Override
public void run() {
System.out.println("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:启动了");
}
} );
return broker;
}
}
send message code:
public class WebSocketUtil implements ILog{
public static Map<String, List<WebSocketSession>> webSocketSessionMap = new HashMap<String, List<WebSocketSession>>();
public static SimpMessagingTemplate simpMessagingTemplate = null;
public static void doSendMessage(String webSocketType) throws IOException {
if(WebSocketUtil.webSocketSessionMap.get(webSocketType) != null) {
String msg = "";
if(WebSocketConstant.WEBSOCKETTYPE_TODOLIST.equals(webSocketType)) {
msg = "change";
}
for(WebSocketSession webSocketSession :WebSocketUtil.webSocketSessionMap.get(webSocketType)) {
webSocketSession.sendMessage(new TextMessage(msg));
}
}
}
public static String getWebSocketType(String url) {
int length = url.indexOf("/cmcp/");
String theUrl = url.substring(length+5);
if(theUrl.startsWith(WebSocketConstant.COMPATIBLEMAPPING)) {
String [] arr = theUrl.split("/");
theUrl = "/"+arr[2];
}
if(!WebSocketConstant.WEBSOCKETTYPE.containsKey(theUrl)) {
logger.error("please config the websocketConstant !!!!");
throw new RuntimeException();
}
String theType = WebSocketConstant.WEBSOCKETTYPE.get(theUrl);
return theType;
}
public synchronized static void initSimpMessagingTemplate() {
if(simpMessagingTemplate == null) {
simpMessagingTemplate = SpringUtil.getBeanByName("brokerMessagingTemplate");
}
}
public static void sendMessage(String topic, String message) {
if(simpMessagingTemplate == null) {
initSimpMessagingTemplate();
}
simpMessagingTemplate.convertAndSend("/topic"+topic, message);
}
public static void sendMessage(String topic) {
sendMessage(topic,"change");
}
}
this is the trigger entity:
#MappedSuperclass
public class WebSocketEntity extends CommonEntity {
/**
* serialVersionUID
*/
private static final long serialVersionUID = 1L;
#PostPersist
public void postPersist() throws IOException{
if(this instanceof TodoEntity) {
WebSocketUtil.sendMessage(WebSocketConstant.WEBSOCKETTYPE_TODOLIST_MAPPING);
}
}
#PostRemove
public void postRemove() throws IOException{
if(this instanceof TodoEntity) {
WebSocketUtil.sendMessage(WebSocketConstant.WEBSOCKETTYPE_TODOLIST_MAPPING);
}
}
}
please give me help! thanks!

Resources