Flux Reactive Microservice - one microservice signal other microservice about backpressure - spring-boot

Two Springboot microservices need to interact with each other to achieve mass emailing feature. Microservice 1: CollectorService collects information about clients and
Microservice 2: NotificationService to send documents via SMTP service. NotificationService is unable to handle the load that CollectorService produces. Now I want to understand how NotificationService can signal CollectorService to slow down. My code looks like below currently.
CollectorSerivce calling NotificationService
final WebClient client = WebClient.builder().baseUrl(BASE_URL.get()).defaultHeaders(httpHeaders -> {
httpHeaders.set("requestId", requestId);
}).build();
Mono<List> responseFlux = client.post().uri(EXTERNAL_API_PATH.get() + "/withattFluxBackPressure")
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromValue(message)).retrieve().bodyToMono(List.class);
NotificationService processing requests
#PostMapping(value = "/externalNotification/withattFluxBackPressure")
public List<NotificationResponse> sendMailMessageWAttFlux(#RequestBody List<EmailTemplate> mailMessages) throws Exception{
log.info("Processing email sending request async...");
// byte[] byteArrayResource = get file content from S3
List<NotificationResponse> response = new ArrayList<>();
Flux<NotificationResponse> responseFlux = Flux.fromIterable(mailMessages).flatMap(messages -> {
try {
return Flux.just(notificationService.sendMailWAtt(messages, byteArrayResource).map(NotificationResponse::error).orElse(NotificationResponse.success()));
} catch (IOException e) {
log.error("Exception", e);
return Flux.just(new NotificationResponse().error(e.getMessage()));
}
});
responseFlux.subscribe(new BaseSubscriber<NotificationResponse>() {
#Override
protected void hookOnSubscribe(Subscription subscription) {
log.info("called hookOnSubscribe......");
subscription.request(1);
}
#Override
protected void hookOnNext(NotificationResponse value) {
log.info("called hookOnNext.......{} ", value);
response.add(value);
request(1);
}
#Override
protected void hookOnComplete() {
log.info("called hookOnComplete.......");
}
#Override
protected void hookOnError(Throwable throwable) {
log.info("called hookOnError.......");
if(throwable instanceof ConnectException) {
log.error("called ConnectException.......");
}
if(throwable instanceof ResourceAccessException) {
log.error("called ResourceAccessException.......");
}
if(throwable instanceof ConnectTimeoutException) {
log.error("called ConnectTimeoutException.......");
}
if(throwable instanceof io.netty.channel.ConnectTimeoutException) {
log.error("called netty ConnectTimeoutException.......");
}
}
});
return response;
}```
1. When NotificationService overloads, how can it signal(backpressure) CollectorService to slow down? (Ideal scenario)
2. Alternatively, NotificationService processes 5 emails then signal/request CollectorService for the next 5.
Thanks for your time!

Related

Decent approach to time OkHttpClient events for metrics?

I'd like to gather connection and request timing metrics for an OkHttpClient instance that calls a particular service. I'm wondering if this approach is correct, and whether my interpretation of the event types makes sense?
Timer callTimer = <new codahale timer>;
Timer connectTimer = <new codahale timer>;
Timer secureConnectTimer = <new codahale timer>;
Timer requestTimer = <new codahale timer>;
# this gets registered with my client
new EventListener() {
// see https://square.github.io/okhttp/events/#eventlistener for info on the ordering of these events
private final Map<Call, Timer.Context> secureConnectTimerContexts = Maps.newConcurrentMap();
private final Map<Call, Timer.Context> connectTimerContexts = Maps.newConcurrentMap();
private final Map<Call, Timer.Context> callTimerContexts = Maps.newConcurrentMap();
private final Map<Call, Timer.Context> requestTimerContexts = Maps.newConcurrentMap();
#Override
public void secureConnectStart(Call call) {
secureConnectTimerContexts.put(call, secureConnectTimer.time());
}
#Override
public void secureConnectEnd(Call call, #Nullable Handshake handshake) {
Timer.Context context = secureConnectTimerContexts.remove(call);
if (Objects.nonNull(context)) {
context.stop();
}
}
#Override
public void connectStart(Call call, InetSocketAddress inetSocketAddress, Proxy proxy) {
connectTimerContexts.put(call, connectTimer.time());
}
#Override
public void connectEnd(Call call, InetSocketAddress inetSocketAddress, Proxy proxy, #Nullable Protocol protocol) {
Timer.Context context = connectTimerContexts.remove(call);
if (Objects.nonNull(context)) {
context.stop();
}
}
#Override
public void connectionAcquired(Call call, Connection connection) {
requestTimerContexts.put(call, requestTimer.time());
}
#Override
public void connectionReleased(Call call, Connection connection) {
Timer.Context context = requestTimerContexts.remove(call);
if (context != null) {
context.stop();
}
}
#Override
public void connectFailed(Call call, InetSocketAddress inetSocketAddress, Proxy proxy,
#Nullable Protocol protocol, IOException ioe) {
Timer.Context context = connectTimerContexts.remove(call);
if (Objects.nonNull(context)) {
context.stop();
}
}
#Override
public void callStart(Call call) {
callTimerContexts.put(call, callTimer.time());
}
#Override
public void callEnd(Call call) {
callFinishedForMetrics(call);
}
#Override
public void callFailed(Call call, IOException ioe) {
callFinishedForMetrics(call);
}
private void callFinishedForMetrics(Call call) {
Timer.Context callTimerContext = callTimerContexts.remove(call);
if (callTimerContext != null) {
callTimerContext.stop();
}
requestTimerContexts.remove(call);
secureConnectTimerContexts.remove(call);
connectTimerContexts.remove(call);
}
}
You can use EventListener.Factory to create a unique listener instance for each Call. That way you don't need all the maps; the Timer.Context objects can just be instance fields of the call-bound EventListener.

PubSub with spring: know the message is publish or not?

My publisher code look like this:
public abstract class PubSubPublisher {
private static final Logger LOGGER = LoggerFactory.getLogger(PubSubPublisher.class);
private final PubSubTemplate pubSubTemplate;
protected PubSubPublisher(PubSubTemplate pubSubTemplate) {
this.pubSubTemplate = pubSubTemplate;
}
protected abstract String topic(String topicName);
public void publish(String topicName, String message) throws StatusRuntimeException {
LOGGER.info("Publishing to topic [{}]. Message: [{}]", topicName, message);
pubSubTemplate.publish(topicName, message);
}
}
My Component
#Component
public class HelloPubSubPublisher extends PubSubPublisher {
#Autowired
public HelloPubSubPublisher(PubSubTemplate pubSubTemplate) throws StatusRuntimeException{
super(pubSubTemplate);
}
#Override
protected String topic(String topicName) {
return topicName;
}
}
Now on my service layer how do i get weather i successful publish the message to topic or not, note all the google api are async which i am using.
try {
publisher.publish(topicName, payload);
}catch (Exception e) {
LOGGER.error("ioException occured: "+e);
throw new TopicNotFoundException();
}
Unfortunately, I am not able to capture the any error, program cursor is not going into the catch block.
Ultimately, I wanted to know weather the code is push the message into topic if not then I have to log it and throw that error to client, which is not happen with my current code with proper exception handling.
Any help or guidance is appreciated, thanks.
Using the function publish() you should be able to capture a future where you can check if the message was published or not.
You have an example of it on Google's PubSub documentation:
// Once published, returns a server-assigned message id (unique within the topic)
ApiFuture<String> future = publisher.publish(pubsubMessage);
// Add an asynchronous callback to handle success / failure
ApiFutures.addCallback(
future,
new ApiFutureCallback<String>() {
#Override
public void onFailure(Throwable throwable) {
if (throwable instanceof ApiException) {
ApiException apiException = ((ApiException) throwable);
// details on the API exception
System.out.println(apiException.getStatusCode().getCode());
System.out.println(apiException.isRetryable());
}
System.out.println("Error publishing message : " + message);
}
#Override
public void onSuccess(String messageId) {
// Once published, returns server-assigned message ids (unique within the topic)
System.out.println(messageId);
}
},
MoreExecutors.directExecutor());

Netty Parallel Handler Processing

Following recommendations elsewhere I am attempting to parallelize my final inbound handler in a Netty pipeline as such
public final class EchoServer {
private EventLoopGroup group = new NioEventLoopGroup();
private UnorderedThreadPoolEventExecutor workers = new UnorderedThreadPoolEventExecutor(10);
public void start(int port) throws InterruptedException {
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_BROADCAST, true)
.handler(new ChannelInitializer<NioDatagramChannel>() {
#Override
protected void initChannel(NioDatagramChannel channel) throws Exception {
channel.pipeline().addLast(workers, new SimpleChannelInboundHandler<DatagramPacket>() {
#Override
public void channelRead0(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception {
System.err.println(packet);
// Simulated database delay that I have to wait to occur before repsonding
Thread.sleep(1000);
ctx.write(new DatagramPacket(Unpooled.copiedBuffer("goodbye", StandardCharsets.ISO_8859_1), packet.sender()));
}
#Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
}
});
}
});
b.bind(port).sync().channel().closeFuture().await();
} finally {
group.shutdownGracefully();
}
}
public void stop() {
group.shutdownGracefully();
}
}
I have ten clients that connect concurrently, as a test, and I am measuring execution time for handling all the requests. As expected with the 1 second delay and sequential execution it takes just over 10 seconds. I am trying to get execution down to somewhere sub 2 seconds to prove parallel handling.
From what I understand adding the handler to the pipeline with an explicitly assigned executor is supposed to parallelize that handlers work across the thread in the executor.
Instead of seeing a increase in performance, what I am finding is that my client is not receiving the responses when I add the parallel processing. The thread sleep is there to simulate the potential time it will take to write the incoming data to a database. Am I doing something obviously wrong here?
I worked around the apparently lack of Netty support for doing final end UDP processing in parallel using standard java concurrency mechanisms.
public final class EchoServer {
private EventLoopGroup group = new NioEventLoopGroup();
private ExecutorService executors = Executors.newFixedThreadPool(10);
public void start(int port) throws InterruptedException {
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioDatagramChannel.class).handler(new ChannelInitializer<NioDatagramChannel>() {
#Override
protected void initChannel(NioDatagramChannel channel) throws Exception {
channel.pipeline().addLast(new SimpleChannelInboundHandler<DatagramPacket>() {
#Override
public void channelRead0(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception {
CompletableFuture.runAsync(() -> {
System.err.println(packet);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
ctx.writeAndFlush(new DatagramPacket(Unpooled.copiedBuffer("goodbye", StandardCharsets.ISO_8859_1),
packet.sender()));
}, executors);
}
#Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
}
});
}
});
b.bind(port).sync().channel().closeFuture().await();
} finally {
group.shutdownGracefully();
}
}
public void stop() {
group.shutdownGracefully();
}
}

OkHttp async interceptor or authenticator

Is there any way to do interception or authentication of request async? For example in flow like this: get password from user, use it to obtain token an then retry the call that was intercepted?
I have the same problem here.
I use wait()and notify() in the getToken method in my code, but because i use OkHttp in Retrofit library, it is creating a new thread and makes it possible to have async operations here.
public synchronized void getToken() throws InterruptedException {
if (!isRefreshing()) {
//This is very important to call notify() on the same object that we call wait();
final TokenProvider myInstance = this;
setRefreshing(true);
Log.d("refreshToken", "Refreshing token..." );
//Make async call
getRestClient().getAccountService().getRefreshedToken(mLoginData.getRefreshToken())
.subscribe(new Observer<LoginResponse>() {
#Override
public void onCompleted() {
synchronized (myInstance) {
setRefreshing(false);
Log.d("refreshToken", "notifyAll onComplete.");
myInstance.notifyAll();
}
}
#Override
public void onError(Throwable e) {
synchronized (myInstance) {
setRefreshing(false);
myInstance.notifyAll();
Log.d("refreshToken", "onError .");
}
}
#Override
public void onNext(LoginResponse loginResponse) {
synchronized (myInstance) {
mLoginData = loginResponse;
Log.d("refreshToken", "notifyAll onNext .");
myInstance.notifyAll();
}
}
});
}
Log.d("refreshToken", "before wait ." + android.os.Process.getThreadPriority(android.os.Process.myTid()) + this.toString());
this.wait();
Log.d("refreshToken", "after wait ." + android.os.Process.getThreadPriority(android.os.Process.myTid()) + this.toString());
}
You can try if this works for you.

how to parse the post request in httpcomponents nio server handler?

I using httpcomponenets nio server to handle post request file upload.
Below is the sample code. I have the complete data in data byte array including params, uploaded file etc. separated by boundary. Is there a parser utility to parse data and get the parameters? Something like request.getParameter("param1"), request.getFile() etc.
public static void main(String[] args) throws Exception {
int port = 8280;
// Create HTTP protocol processing chain
HttpProcessor httpproc = HttpProcessorBuilder.create()
.add(new ResponseDate())
.add(new ResponseServer("Test/1.1"))
.add(new ResponseContent())
.add(new ResponseConnControl()).build();
// Create request handler registry
UriHttpAsyncRequestHandlerMapper reqistry = new UriHttpAsyncRequestHandlerMapper();
// Register the default handler for all URIs
reqistry.register("/test*", new RequestHandler());
// Create server-side HTTP protocol handler
HttpAsyncService protocolHandler = new HttpAsyncService(httpproc, reqistry) {
#Override
public void connected(final NHttpServerConnection conn) {
System.out.println(conn + ": connection open");
super.connected(conn);
}
#Override
public void closed(final NHttpServerConnection conn) {
System.out.println(conn + ": connection closed");
super.closed(conn);
}
};
// Create HTTP connection factory
NHttpConnectionFactory<DefaultNHttpServerConnection> connFactory;
connFactory = new DefaultNHttpServerConnectionFactory(
ConnectionConfig.DEFAULT);
// Create server-side I/O event dispatch
IOEventDispatch ioEventDispatch = new DefaultHttpServerIODispatch(protocolHandler, connFactory);
// Set I/O reactor defaults
IOReactorConfig config = IOReactorConfig.custom()
.setIoThreadCount(1)
.setSoTimeout(3000)
.setConnectTimeout(3000)
.build();
// Create server-side I/O reactor
ListeningIOReactor ioReactor = new DefaultListeningIOReactor(config);
try {
// Listen of the given port
ioReactor.listen(new InetSocketAddress(port));
// Ready to go!
ioReactor.execute(ioEventDispatch);
} catch (InterruptedIOException ex) {
System.err.println("Interrupted");
} catch (IOException e) {
System.err.println("I/O error: " + e.getMessage());
}
System.out.println("Shutdown");
}
public static class RequestHandler implements HttpAsyncRequestHandler<HttpRequest> {
public void handleInternal(HttpRequest httpRequest, HttpResponse httpResponse, HttpContext httpContext) throws HttpException, IOException {
HttpEntity entity = null;
if (httpRequest instanceof HttpEntityEnclosingRequest)
entity = ((HttpEntityEnclosingRequest)httpRequest).getEntity();
byte[] data;
if (entity == null) {
data = new byte [0];
} else {
data = EntityUtils.toByteArray(entity);
}
System.out.println(new String(data));
httpResponse.setEntity(new StringEntity("success response"));
}
#Override public HttpAsyncRequestConsumer<HttpRequest> processRequest(HttpRequest request, HttpContext context) throws HttpException, IOException {
return new BasicAsyncRequestConsumer();
}
#Override
public void handle(HttpRequest request, HttpAsyncExchange httpExchange, HttpContext context) throws HttpException, IOException {
HttpResponse response = httpExchange.getResponse();
handleInternal(request, response, context);
httpExchange.submitResponse(new BasicAsyncResponseProducer(response));
}
}
MIME content parsing (as well handling of content of any type) is out of scope for Apache HttpComponents. Please consider using Apache Mime4J.

Resources