Apache Curator + Spring Boot: Simple Observer pattern example - spring

I am trying to start a basic project structure where multiple spring boot application will share resources using apache curator.
I am following the guides specified in documentation but changing the nodes doesnt trigger any events
Please, any help would be appreciated
pom.xml
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>2.12.0</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>2.12.0</version>
</dependency>
docker-compose.yaml
version: '3.1'
services:
zoo1:
image: zookeeper
restart: always
hostname: zoo1
ports:
- 2181:2181
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2:
image: zookeeper
restart: always
hostname: zoo2
ports:
- 2182:2181
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3:
image: zookeeper
restart: always
hostname: zoo3
ports:
- 2183:2181
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
Creator
package com.training.zoo.sss;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.zookeeper.data.Stat;
import org.springframework.stereotype.Service;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static java.lang.System.out;
#Service
public class Client {
String connectionInfo = "127.0.0.1:2181";
String ZK_PATH = "/someapp/somemodule/someroute";
public Client() throws Exception {
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client =
CuratorFrameworkFactory.builder()
.connectString(connectionInfo)
.sessionTimeoutMs(5000)
.connectionTimeoutMs(5000)
.retryPolicy(retryPolicy)
.namespace("base")
.build();
client.start();
Stat stat1 = client.checkExists().creatingParentContainersIfNeeded().forPath(ZK_PATH);
if (stat1 == null) {
client.create().forPath(ZK_PATH, "sometdata".getBytes());
}
byte[] bytes = client.getData().forPath(ZK_PATH);
out.println(new String(bytes, StandardCharsets.UTF_8));
// Update value every half second
final AtomicInteger i = new AtomicInteger(0);
ScheduledExecutorService exec = Executors.newScheduledThreadPool(1);
exec.scheduleAtFixedRate(new Runnable(){
#Override
public void run(){
i.set(i.get()+1);
System.out.println(i);
try {
client.setData().forPath(ZK_PATH, ("init_" + i ).getBytes());
} catch (Exception e) {
e.printStackTrace();
}
}
}, 0, 500, TimeUnit.MILLISECONDS);
}
}
Listener
package com.training.bookstore.request;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.springframework.stereotype.Service;
#Service
public class Watcher2 {
String connectionInfo = "127.0.0.1:2181";
String ZK_PATH = "/someapp/somemodule/someroute";
public Watcher2() throws Exception {
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client =
CuratorFrameworkFactory.builder()
.connectString(connectionInfo)
.sessionTimeoutMs(5000)
.connectionTimeoutMs(5000)
.retryPolicy(retryPolicy)
.namespace("base")
.build();
client.start();
PathChildrenCache watcher = new PathChildrenCache(
client, ZK_PATH, true // if cache data
);
watcher.getListenable().addListener((client1, event) -> {
ChildData data = event.getData();
if (data == null) {
System.out.println("No data in event[" + event + "]");
} else {
System.out.println("Receive event: "
+ "type=[" + event.getType() + "]"
+ ", path=[" + data.getPath() + "]"
+ ", data=[" + new String(data.getData()) + "]"
+ ", stat=[" + data.getStat() + "]");
}
});
watcher.start(PathChildrenCache.StartMode.NORMAL);
System.out.println("Register zk watcher successfully!");
}
}
Thank you

So yeah that class name PathChildrenCache sounded a bit off to me.
Solution is
If producer produces on specified path
String connectionInfo = "127.0.0.1:2181";
String PATH = "/someapp/somemodule/whatever";
In Watcher class set path to "parent" of that node
String connectionInfo = "127.0.0.1:2181";
String PATH = "/someapp/somemodule";
And in case you need to listen to subnodes/subfolders of your producer path,
instead of using PathChildrenCache use TreeCache

Related

Using Spring EL to add optional postfix from properties to consumerGroup in #KafkaListener

I have simple spring boot application with Kafka Consumers that looks like
#KafkaListener(topics="topic", groupId="SOME_CONSTANT") {
....
}
What I am required to do Is to add optional spring boot property (from env variables but that is not important) lets say:
myapp.env: TEST
And when that variable is present I should automatically update consumer group to be
SOME_CONSTANT-TEST
I am playing with SPEL
#KafkaListener(topics="topic", groupId="#{ '${myApp.env}' == null ? 'SOME_CONSTANT' : 'SOME_CONSTANT' + '-' + '${myApp.env}}'") {
....
}
But that does not seem to work :/ Any Ideas?
You can use the T operator to read the constant's value, and use the colon ':' for the case when there's no env variable:
#KafkaListener(topics="topic", groupId="#{ '${my.app.env:}' == '' ? T(com.mypackage.MyListener).SOME_CONSTANT : T(com.mypackage.MyListener).SOME_CONSTANT + '-' + '${my.app.env:}'}")
Here's a sample application with this solution:
package org.spring.kafka.playground;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.stereotype.Component;
#SpringBootApplication
public class SO71291726 {
public static void main(String[] args) {
ConfigurableApplicationContext context = SpringApplication.run(SO71291726.class, args);
try {
Thread.sleep(10000);
}
catch (InterruptedException e) {
Thread.interrupted();
throw new RuntimeException("Interrupted");
}
KafkaOperations kafkaTemplate = context.getBean("kafkaTemplate", KafkaOperations.class);
kafkaTemplate.send("topic", "My message");
}
Logger log = LoggerFactory.getLogger(this.getClass());
public static final String SOME_CONSTANT = "my-group-id-constant";
#Component
class MyListener {
#KafkaListener(topics="topic", groupId="#{ '${71291726.my.app.env:}' == '' ? T(org.spring.kafka.playground.SO71291726).SOME_CONSTANT : T(org.spring.kafka.playground.SO71291726).SOME_CONSTANT + '-' + '${71291726.my.app.env:}'}")
void listen(String message, #Header(KafkaHeaders.GROUP_ID) String groupId) {
log.info("Received message {} from group id {} ", message, groupId);
}
}
}
Output:
2022-02-28 14:26:14.733 INFO 18841 --- [ntainer#0-0-C-1] 1291726$$EnhancerBySpringCGLIB$$cf264156 : Received message My message from group id my-group-id-constant
If I add 71291726.my.app.env = TEST to the application.properties file:
2022-02-28 14:34:03.900 INFO 18870 --- [ntainer#0-0-C-1] 1291726$$EnhancerBySpringCGLIB$$e1a5933e : Received message My message from group id my-group-id-constant-TEST

Trouble with Resilience4j Retry and "java.net.http.HttpClient" working together

I'm trying to get a basic "httpclient" "httprequest" "httpresponse" working with Resilience4j Retry.
The verbatim code from : https://resilience4j.readme.io/docs/retry
RetryConfig config = RetryConfig.custom()
.maxAttempts(5)
.waitDuration(Duration.ofMillis(1000))
.retryOnResult(response -> response.getStatus() == 500)
.retryOnException(e -> e instanceof WebServiceException)
.retryExceptions(IOException.class, TimeoutException.class)
.ignoreExceptions(BusinessException.class, OtherBusinessException.class)
.build();
// Create a RetryRegistry with a custom global configuration
RetryRegistry registry = RetryRegistry.of(config);
// Get or create a Retry from the registry -
// Retry will be backed by the default config
Retry retryWithDefaultConfig = registry.retry("name1");
Note, their code above misses defining the generic "T", like this:
RetryConfig config = RetryConfig.<MyConcrete>custom()
and the verbatim code from : https://resilience4j.readme.io/docs/examples
Supplier<String> supplierWithResultAndExceptionHandler = SupplierUtils
.andThen(supplier, (result, exception) -> "Hello Recovery");
Supplier<HttpResponse> supplier = () -> httpClient.doRemoteCall();
Supplier<HttpResponse> supplierWithResultHandling = SupplierUtils.andThen(supplier, result -> {
if (result.getStatusCode() == 400) {
throw new ClientException();
} else if (result.getStatusCode() == 500) {
throw new ServerException();
}
return result;
});
HttpResponse httpResponse = circuitBreaker
.executeSupplier(supplierWithResultHandling);
======
So using those 2 "partials" , I've come up with this.
Note, I am using some "real" java.net.http.HttpClient and java.net.http.HttpResponse (from JDK11)
import io.github.resilience4j.core.SupplierUtils;
import io.github.resilience4j.retry.Retry;
import io.github.resilience4j.retry.RetryConfig;
import io.github.resilience4j.retry.RetryRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import javax.inject.Inject;
import java.io.IOException;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
public final class ResilientHttpClient /* implements IResilientHttpClient */ {
private static Logger logger;
private final HttpClient httpClient;
#Inject
public ResilientHttpClient(final HttpClient httpClient) {
this(LoggerFactory
.getLogger(ResilientHttpClient.class), httpClient);
}
/**
* Constructor, which pre-populates the provider with one resource instance.
*/
public ResilientHttpClient(final Logger lgr,
final HttpClient httpClient) {
if (null == lgr) {
throw new IllegalArgumentException("Logger is null");
}
this.logger = lgr;
if (null == httpClient) {
throw new IllegalArgumentException("HttpClient is null");
}
this.httpClient = httpClient;
}
public String executeHttpRequest(String circuitbreakerInstanceName, HttpRequest httpRequest) {
try {
/* circuitbreakerInstanceName is future place holder for .yml configuration see : https://resilience4j.readme.io/docs/getting-started-3 */
RetryConfig config = RetryConfig.<HttpResponse>custom()
.waitDuration(Duration.ofMillis(1000))
.retryOnResult(response -> response.statusCode() == 500)
.retryOnException(e -> e instanceof ArithmeticException)
.retryExceptions(IOException.class, TimeoutException.class)
//.ignoreExceptions(BusinessException.class, OtherBusinessException.class)
.build();
// Create a RetryRegistry with a custom global configuration
RetryRegistry registry = RetryRegistry.of(config);
// Get or create a Retry from the registry -
// Retry will be backed by the default config
Retry retryWithDefaultConfig = registry.retry(circuitbreakerInstanceName);
Supplier<HttpResponse> supplier = () -> this.httpClient.send(httpRequest, HttpResponse.BodyHandlers.ofString());
Supplier<String> supplierWithResultAndExceptionHandler = SupplierUtils
.andThen(supplier, (result, exception) -> "Hello Recovery");
Supplier<HttpResponse> supplierWithResultHandling = SupplierUtils.andThen(supplier, result -> {
if (result.statusCode() == HttpStatus.BAD_REQUEST.value()) {
throw new RuntimeException("400");
} else if (result.statusCode() == HttpStatus.INTERNAL_SERVER_ERROR.value()) {
throw new RuntimeException("500");
}
return result;
});
HttpResponse<String> response = retryWithDefaultConfig.executeSupplier(supplierWithResultHandling);
String responseBody = response.body();
return responseBody;
} catch (Exception ex) {
throw new RuntimeException((ex));
}
}
}
The issue I am having is:
The line:
Supplier<HttpResponse> supplier = () - > this.httpClient.send(httpRequest, HttpResponse.BodyHandlers.ofString());
is giving an error (in intelliJ) of "unhandled exceptions" "IOException, InterruptedException"
So modifying the method to be:
public String executeHttpRequest(String circuitbreakerInstanceName, HttpRequest httpRequest) throws IOException, InterruptedException {
"feels wrong". But even when I try it...it doesn't resolve anything. :(
It is probably some lamda checked-exception voodoo.
But more to the point:
So I don't know if the way I've brought together the 2 partials is even correct. The samples are a little lacking in the fully-working area.
Thank for any help. Getting a basic httpclient "retry" a few times shouldn't be too hard. But I'm hitting my head against the wall.
My gradle dependencies.
dependencies {
implementation group: 'javax.inject', name: 'javax.inject', version: javaxInjectVersion
implementation group: 'org.slf4j', name: 'slf4j-api', version: slf4jVersion
implementation group: 'org.springframework', name: 'spring-web', version: springWebVersion
implementation "io.github.resilience4j:resilience4j-circuitbreaker:${resilience4jVersion}"
implementation "io.github.resilience4j:resilience4j-ratelimiter:${resilience4jVersion}"
implementation "io.github.resilience4j:resilience4j-retry:${resilience4jVersion}"
implementation "io.github.resilience4j:resilience4j-bulkhead:${resilience4jVersion}"
implementation "io.github.resilience4j:resilience4j-cache:${resilience4jVersion}"
implementation "io.github.resilience4j:resilience4j-timelimiter:${resilience4jVersion}"
testCompile group: 'junit', name: 'junit', version: junitVersion
}
and
resilience4jVersion = '1.5.0'
slf4jVersion = "1.7.30"
javaxInjectVersion = "1"
springWebVersion = '5.2.8.RELEASE'
junitVersion = "4.12"
just out of interest:
Which Java version are you using? Java 11?
Why can't you use Spring Boot? The Resilience4j Spring Boot starter simplifies the configuration a lot.
If you configure retryOnResult(response -> response.getStatus() == 500), you don't have to use SupplierUtils anymore to map a HttpResponse with a certain status code to a runtime exception.
RetryConfig config = RetryConfig.<HttpResponse<String>>custom()
.waitDuration(Duration.ofMillis(1000))
.retryOnResult(response -> response.statusCode() == 500)
.retryExceptions(IOException.class, TimeoutException.class)
.build();
Please don't create Registries and Configs inside of executeHttpRequest, but inject them into your Constructor.
You can create a static method like this:
public static <T> HttpResponse<T> executeHttpRequest(Callable<HttpResponse<T>> callable, Retry retry, CircuitBreaker circuitBreaker) throws Exception {
return Decorators.ofCallable(callable)
.withRetry(retry)
.withCircuitBreaker(circuitBreaker)
.call();
}
and invoke the method as follows:
HttpResponse<String> response = executeHttpRequest(
() -> httpClient.send(request, HttpResponse.BodyHandlers.ofString()),
retry,
circuitBreaker);

Spring Boot test Kafka

I'm using Spring Boot version 2.1.8.RELEASE, and I have this problem:
Have you a solution please ?
[Thread-2] ERROR o.a.k.t.TestUtils - Error deleting C:\Users\usr\AppData\Local\Temp\kafka-255644115154741962
java.nio.file.FileSystemException: C:\Users\usr\AppData\Local\Temp\kafka-255644115154741962\version-2\log.1:
The process cannot access the file because it is being used by another process.
at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86)
at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97)
at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:102)
at sun.nio.fs.WindowsFileSystemProvider.implDelete(WindowsFileSystemProvider.java:269)
at sun.nio.fs.AbstractFileSystemProvider.delete(AbstractFileSystemProvider.java:103)
at java.nio.file.Files.delete(Files.java:1126)
at org.apache.kafka.common.utils.Utils$2.visitFile(Utils.java:734)
at org.apache.kafka.common.utils.Utils$2.visitFile(Utils.java:723)
at java.nio.file.Files.walkFileTree(Files.java:2670)
at java.nio.file.Files.walkFileTree(Files.java:2742)
at org.apache.kafka.common.utils.Utils.delete(Utils.java:723)
at org.apache.kafka.test.TestUtils$1.run(TestUtils.java:184)
This is my test, i use a windows 10 like OS,
This is my test,
import org.I0Itec.zkclient.ZkClient;
import org.junit.Test;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import kafka.zk.EmbeddedZookeeper;
public class BaseTest {
private static final String ZKHOST = "127.0.0.1";
#Test
public void producerTest(){
// setup Zookeeper
EmbeddedZookeeper zkServer = new EmbeddedZookeeper();
String zkConnect = ZKHOST + ":" + zkServer.port();
ZkClient zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = ZkUtils.apply(zkClient, false);
zkClient.close();
zkServer.shutdown();
}
}

is putting sqs-consumer to detect receiveMessage event in sqs scalable

I am using aws sqs as message queue. After sqs.sendMessage sends the data , I want to detect sqs.receiveMessage via either infinite loop or event triggering in scalable way. Then I came accross sqs-consumer
to handle sqs.receiveMessage events, the moment it receives the messages. But I was wondering , is it the most suitable way to handle message passing between microservices or is there any other better way to handle this thing?
I had written the code in java for fetching the data from sqs queue with SQSBufferedAsyncClient, advantages using this API is buffered the messages in async mode.
/**
*
*/
package com.sxm.aota.tsc.config;
import java.net.UnknownHostException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.retry.RetryPolicy;
import com.amazonaws.retry.RetryPolicy.BackoffStrategy;
import com.amazonaws.services.sqs.AmazonSQSAsync;
import com.amazonaws.services.sqs.AmazonSQSAsyncClient;
import com.amazonaws.services.sqs.buffered.AmazonSQSBufferedAsyncClient;
import com.amazonaws.services.sqs.buffered.QueueBufferConfig;
#Configuration
public class SQSConfiguration {
/** The properties cache config. */
#Autowired
private PropertiesCacheConfig propertiesCacheConfig;
#Bean
public AmazonSQSAsync amazonSQSClient() {
// Create Client Configuration
ClientConfiguration clientConfig = new ClientConfiguration()
.withMaxErrorRetry(5)
.withConnectionTTL(10_000L)
.withTcpKeepAlive(true)
.withRetryPolicy(new RetryPolicy(
null,
new BackoffStrategy() {
#Override
public long delayBeforeNextRetry(AmazonWebServiceRequest req,
AmazonClientException exception, int retries) {
// Delay between retries is 10s unless it is UnknownHostException
// for which retry is 60s
return exception.getCause() instanceof UnknownHostException ? 60_000L : 10_000L;
}
}, 10, true));
// Create Amazon client
AmazonSQSAsync asyncSqsClient = null;
if (propertiesCacheConfig.isIamRole()) {
asyncSqsClient = new AmazonSQSAsyncClient(new InstanceProfileCredentialsProvider(true), clientConfig);
} else {
asyncSqsClient = new AmazonSQSAsyncClient(
new BasicAWSCredentials("sceretkey", "accesskey"));
}
final Regions regions = Regions.fromName(propertiesCacheConfig.getRegionName());
asyncSqsClient.setRegion(Region.getRegion(regions));
asyncSqsClient.setEndpoint(propertiesCacheConfig.getEndPoint());
// Buffer for request batching
final QueueBufferConfig bufferConfig = new QueueBufferConfig();
// Ensure visibility timeout is maintained
bufferConfig.setVisibilityTimeoutSeconds(20);
// Enable long polling
bufferConfig.setLongPoll(true);
// Set batch parameters
// bufferConfig.setMaxBatchOpenMs(500);
// Set to receive messages only on demand
// bufferConfig.setMaxDoneReceiveBatches(0);
// bufferConfig.setMaxInflightReceiveBatches(0);
return new AmazonSQSBufferedAsyncClient(asyncSqsClient, bufferConfig);
}
}
then written the scheduleR which executes after every 2 secs and fetches the data from queue, process it and delete it from queue before visibility timeout otherwise it will be ready for processing again when visibility tiiimeout expires again.
package com.sxm.aota.tsc.sqs;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import com.amazonaws.services.sqs.AmazonSQSAsync;
import com.amazonaws.services.sqs.model.DeleteMessageRequest;
import com.amazonaws.services.sqs.model.GetQueueUrlRequest;
import com.amazonaws.services.sqs.model.GetQueueUrlResult;
import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
import com.amazonaws.services.sqs.model.ReceiveMessageResult;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* The Class TSCDataSenderScheduledTask.
*
* Sends the aggregated Vehicle data to TSC in batches
*/
#EnableScheduling
#Component("sqsScheduledTask")
#DependsOn({ "propertiesCacheConfig", "amazonSQSClient" })
public class SQSScheduledTask {
private static final Logger LOGGER = LoggerFactory.getLogger(SQSScheduledTask.class);
#Autowired
private PropertiesCacheConfig propertiesCacheConfig;
#Autowired
public AmazonSQSAsync amazonSQSClient;
/**
* Timer Task that will run after specific interval of time Majorly
* responsible for sending the data in batches to TSC.
*/
private String queueUrl;
private final ObjectMapper mapper = new ObjectMapper();
#PostConstruct
public void initialize() throws Exception {
LOGGER.info("SQS-Publisher", "Publisher initializing for queue " + propertiesCacheConfig.getSQSQueueName(),
"Publisher initializing for queue " + propertiesCacheConfig.getSQSQueueName());
// Get queue URL
final GetQueueUrlRequest request = new GetQueueUrlRequest().withQueueName(propertiesCacheConfig.getSQSQueueName());
final GetQueueUrlResult response = amazonSQSClient.getQueueUrl(request);
queueUrl = response.getQueueUrl();
LOGGER.info("SQS-Publisher", "Publisher initialized for queue " + propertiesCacheConfig.getSQSQueueName(),
"Publisher initialized for queue " + propertiesCacheConfig.getSQSQueueName() + ", URL = " + queueUrl);
}
#Scheduled(fixedDelayString = "${sqs.consumer.delay}")
public void timerTask() {
final ReceiveMessageResult receiveResult = getMessagesFromSQS();
String messageBody = null;
if (receiveResult != null && receiveResult.getMessages() != null && !receiveResult.getMessages().isEmpty()) {
try {
messageBody = receiveResult.getMessages().get(0).getBody();
String messageReceiptHandle = receiveResult.getMessages().get(0).getReceiptHandle();
Vehicles vehicles = mapper.readValue(messageBody, Vehicles.class);
processMessage(vehicles.getVehicles(),messageReceiptHandle);
} catch (Exception e) {
LOGGER.error("Exception while processing SQS message : {}", messageBody);
// Message is not deleted on SQS and will be processed again after visibility timeout
}
}
}
public void processMessage(List<Vehicle> vehicles,String messageReceiptHandle) throws InterruptedException {
//processing code
//delete the sqs message as the processing is completed
//Need to create atomic counter that will be increamented by all TS.. Once it will be 0 then we will be deleting the messages
amazonSQSClient.deleteMessage(new DeleteMessageRequest(queueUrl, messageReceiptHandle));
}
private ReceiveMessageResult getMessagesFromSQS() {
try {
// Create new request and fetch data from Amazon SQS queue
final ReceiveMessageResult receiveResult = amazonSQSClient
.receiveMessage(new ReceiveMessageRequest().withMaxNumberOfMessages(1).withQueueUrl(queueUrl));
return receiveResult;
} catch (Exception e) {
LOGGER.error("Error while fetching data from SQS", e);
}
return null;
}
}

Push pull on couchabase server side thro' couchbase lite client side

i have tried to create one small java code to handle couchbase lite database and to do push pull operation
senario in depth is as follows
what i did is i have created bucket named as sync_gateway,
and conected with couchbase server by below config.json
{
"interface":":4984",
"adminInterface":":4985",
"databases":{
"db":{
"server":"http://localhost:8091",
"bucket":"sync_gateway",
"sync":function(doc) {
channel(doc.channels);
}
}
}
}
this had created metadata in sync_gateway bucket on server,
the n i have written sample java code for local database CBL , and wrote functions for push pull operations ...
code:
package com.Testing_couchbaseLite;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import javax.naming.ldap.ManageReferralControl;
import org.apache.http.cookie.Cookie;
import com.couchbase.lite.Context;
import com.couchbase.lite.CouchbaseLiteException;
import com.couchbase.lite.Database;
import com.couchbase.lite.Document;
import com.couchbase.lite.JavaContext;
import com.couchbase.lite.Manager;
import com.couchbase.lite.ManagerOptions;
import com.couchbase.lite.QueryOptions;
import com.couchbase.lite.replicator.Replication;
import com.couchbase.lite.support.HttpClientFactory;
public class Test_syncGateWay {
private URL createSyncURL(boolean isEncrypted){
URL syncURL = null;
String host = "https://localhost"; //sync gateway ip
String port = "4984"; //sync gateway port
String dbName = "db";
try {
syncURL = new URL(host + ":" + port + "/" + dbName);
} catch (MalformedURLException me) {
me.printStackTrace();
}
return syncURL;
}
private void startReplications() throws CouchbaseLiteException {
try {
Map<String, Object> map = new HashMap<String, Object>();
map.put("id", "1");
map.put("name","ram");
Manager man = new Manager(new JavaContext(), Manager.DEFAULT_OPTIONS);
Database db = man.getDatabase("sync_gateway");
Document doc = db.createDocument();
doc.putProperties(map);
System.out.println("-------------done------------");
System.out.println(man.getAllDatabaseNames());
System.out.println(man.getDatabase("sync_gateway").getDocumentCount());
System.out.println(db.getDocument("1").getCurrentRevisionId());
System.out.println(db.exists());
Replication pull = db.createPullReplication(this.createSyncURL(true));
Replication push = db.createPushReplication(this.createSyncURL(true));
pull.setContinuous(true);
push.setContinuous(true);
pull.start();
push.start();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private void createDatabase() throws CouchbaseLiteException, IOException {
// TODO Auto-generated method stub
}
public static void main(String[] args) throws CouchbaseLiteException, IOException {
new Test_syncGateWay().startReplications();
}
}
now i am stating sync gateway by that config file and running java code to create document on CBL and CB server by push pull operation.
bt it is showing error as
Jul 08, 2016 10:27:21 AM com.couchbase.lite.util.SystemLogger e
SEVERE: RemoteRequest: RemoteRequest{GET, https://localhost:4984/db/_local/2eafda901c4de2fe022af262d5cc7d1c0cb5c2d2}: executeRequest() Exception: javax.net.ssl.SSLPeerUnverifiedException: peer not authenticated. url: https://localhost:4984/db/_local/2eafda901c4de2fe022af262d5cc7d1c0cb5c2d2
so is there any misunderstanding in my concept??? and how do i resolve this problem??
You have not set up your Sync Gateway for SSL. You need to add the SSLCert and SSLPass keys to your config file.

Resources