Reactor Netty TcpServer with Pipeline - reactor-netty

The way I would have put together a Netty Tcp Server before reactor netty was to create the server bootsrap and add my custom pipeline class.
With Reactor-Netty there is the TcpServer.create(), but seems that I have to create a new functional interface that takes NettyInbound and NettyOutbound and returns a Mono.
However if I want to add a ChannelInitializer that builds my pipeline, I have to block to get the NettyContext.
The incoming message is received by the functional interface and I can send a response, but nothing go through my pipeline.
Is there a way to make us of Reactor Netty and have the message flow through a customized pipeline?
Returning the Mono.just("Hi") with neverComplete() successfully sends 'Hi' to the client when a connection is made and when a message is received, but I need to rather offload this to the pipeline and then have the result feed back to the client.
public void startServer() throws InterruptedException{
EventLoopGroup group = new NioEventLoopGroup(1);
try {
final TcpServer server = TcpServer.create(opts -> opts
.eventLoopGroup(group)
.listen(tcpSocketAddress));
server
.newHandler((in, out) -> {
in.receive()
.take(1)
.log(ApolloApplicationTests.class.getName())
.subscribe(data -> {
log.info("Server Received: {}", data.toString(CharsetUtil.UTF_8));
latch.countDown();
});
return out.sendString(Mono.just("Hi")).neverComplete();
})
.block().addHandler(clientEndPoint)
.channel()
.closeFuture().sync();
} finally {
group.shutdownGracefully().sync();
}
}
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Configurable;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Component;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.codec.LengthFieldPrepender;
import io.netty.handler.codec.MessageToMessageDecoder;
import reactor.util.Logger;
import reactor.util.Loggers;
#Configurable
#Component
public class ClientEndPoint extends ChannelInitializer<Channel> {
final Logger log = Loggers.getLogger(ApolloApplication.class);
private ChannelPipeline pipeline;
#Autowired
private ChannelHandlerAdapter messageInterchange;
#Autowired
private LengthFieldBasedFrameDecoder lowOrderVliDecoder;
#Autowired
private MessageToMessageDecoder<ByteBuf> messageDecoder;
#Autowired
private LengthFieldPrepender vliEncoder;
#Autowired
#Qualifier("inBound")
List<ChannelHandler> inBoundHandlers;
#Autowired
#Qualifier("outBound")
List<ChannelHandler> outBoundHandlers;
#Override
protected void initChannel(Channel sc) throws Exception {
this.pipeline = sc.pipeline();
this.pipeline.addLast("lowOrderVliDecoder", this.lowOrderVliDecoder);
this.pipeline.addLast("messageDecoder", this.messageDecoder);
this.pipeline.addLast("vliEncoder", this.vliEncoder);
for (ChannelHandler handler : this.inBoundHandlers) {
this.pipeline.addLast(handler);
}
this.pipeline.addLast("messageInterchange", this.messageInterchange);
for (ChannelHandler handler : this.outBoundHandlers) {
this.pipeline.addLast(handler);
}
}
public void accept(Channel sc) {
this.pipeline = sc.pipeline();
this.pipeline.addLast("lowOrderVliDecoder", this.lowOrderVliDecoder);
this.pipeline.addLast("messageDecoder", this.messageDecoder);
this.pipeline.addLast("vliEncoder", this.vliEncoder);
for (ChannelHandler handler : this.inBoundHandlers) {
this.pipeline.addLast(handler);
}
this.pipeline.addLast("messageInterchange", this.messageInterchange);
for (ChannelHandler handler : this.outBoundHandlers) {
this.pipeline.addLast(handler);
}
}
}

So this I figured out
public Mono<? extends NettyContext> initializeServer() throws InterruptedException {
this.log.debug("Server Initializing");
BiFunction<? super NettyInbound, ? super NettyOutbound, ? extends Publisher<Void>> serverHandler = (in,
out) -> {
in.receive().asString().subscribe(data -> {
this.log.debug("Received " + data + " on " + in);
});
return Flux.never();
};
TcpServer server = TcpServer.create(opts -> opts.afterChannelInit(pipeline).listen(tcpSocketAddress));
return server.newHandler(serverHandler);
}
where pipeline is the class that implements Consumer and builds the pipeline in the accept method as a typical netty pipeline.
Then I start the server
private void startServer(Mono<? extends NettyContext> connected) {
ChannelFuture f = connected.block(Duration.ofSeconds(5)).channel()
.closeFuture();
final CountDownLatch channelLatch = new CountDownLatch(1);
f.addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture cf) throws Exception {
log.debug("Channel Disconnected");
}
});
f.awaitUninterruptibly();
// Now we are sure the future is completed.
assert f.isDone();
if (f.isCancelled()) {
this.log.warn("Connection Cancelled");
} else if (!f.isSuccess()) {
if (f.cause() != null) {
f.cause().printStackTrace();
} else {
this.log.warn("Connection not successful");
}
} else {
channelLatch.countDown();
this.log.info("Server Start Successful");
}
try {
channelLatch.await();
} catch (InterruptedException ex) {
throw new CancellationException("Interrupted while waiting for streaming " + "connection to arrive.");
}
}

Related

Require assistance with simple pure Java 11 WebSocket client example

There appears to be very little Java 11 (pure Java non framework based) WebSocket client code examples on the web so I'm hoping StackOverflow can come to the rescue for me once again.
This is the closest I've found, but unfortunately to my (novice) eyes, it doesn't appear to be a complete solution in showing how to consume the data from the WebSocket listener.
Looking at the WebSocket.Listener implementation, the onText callback method I presume would provide what I need, but I'm struggling to figure out how to return the CompletionStage object and some sort of string data from the socket.
This is some test code I have so far.
Would appreciate assistance. Thanks
public class Main {
public static void main(String[] args) {
WebSocketClient wsc = new WebSocketClient();
wsc.startSocket("ws://demos.kaazing.com/echo");
int i = 0;
// Bad, very bad
do {} while (i == 0);
}
}
public class WebSocketClient implements WebSocket.Listener {
#Override
public void onOpen(WebSocket webSocket) {
//...
System.out.println("Go...Open".concat(
webSocket.getSubprotocol()));
}
#Override
public CompletionStage<?> onText(WebSocket webSocket, CharSequence data, boolean last) {
//...
System.out.println(data.toString());
// How do I return the CompletionStage object
// return CompletionStage<String>
}
#Override
public void onError(WebSocket webSocket, Throwable error) {
//..
System.out.println("Bad day! ".concat(webSocket.toString()));
}
void startSocket(String connection) {
CompletableFuture<WebSocket> server_cf = HttpClient.
newHttpClient().
newWebSocketBuilder().
buildAsync(URI.create(connection),
new WebSocketClient());
WebSocket server = server_cf.join();
server.sendText("Hello!", true);
}
}
Below you find a working example. I have made some changes to your code above:
onOpen needs to invoke request(1) on the websocket (invoking the default implementation) in order to receive further invocations.
moved method startSocket into the main method
replaced busy waiting with a count down latch
declared class WebSocketClient as a (static) inner class
but beyond these (cosmetic) changes the program follows your idea, i.e. first a websocket connection is build and after successful construction the text Hello! is sent to the echo server. This could also be done in method onOpen directly.
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.WebSocket;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
public class Main {
public static void main(String[] args) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
WebSocket ws = HttpClient
.newHttpClient()
.newWebSocketBuilder()
.buildAsync(URI.create("ws://demos.kaazing.com/echo"), new WebSocketClient(latch))
.join();
ws.sendText("Hello!", true);
latch.await();
}
private static class WebSocketClient implements WebSocket.Listener {
private final CountDownLatch latch;
public WebSocketClient(CountDownLatch latch) { this.latch = latch; }
#Override
public void onOpen(WebSocket webSocket) {
System.out.println("onOpen using subprotocol " + webSocket.getSubprotocol());
WebSocket.Listener.super.onOpen(webSocket);
}
#Override
public CompletionStage<?> onText(WebSocket webSocket, CharSequence data, boolean last) {
System.out.println("onText received " + data);
latch.countDown();
return WebSocket.Listener.super.onText(webSocket, data, last);
}
#Override
public void onError(WebSocket webSocket, Throwable error) {
System.out.println("Bad day! " + webSocket.toString());
WebSocket.Listener.super.onError(webSocket, error);
}
}
}
Btw, no supprotocol was negotiated, therefore method webSocket.getSubprotocol() returns an empty string. The output in the console is
onOpen using subprotocol
onText received Hello!
The pattern for managing a WebSocket response returning a CompletionStage is:
#Override
public CompletionStage<?> onText(WebSocket webSocket, CharSequence data, boolean last) {
// return inmmediately but response is geenrated lazyly.
return CompletableFuture.supplyAsync(() -> {
String response = "Received ...";
// do slow task. Access to database or access to a server.
return response;
});
}
This simple implementation only is recommended when the response is generated quickly.
#Override
public CompletionStage<?> onText(WebSocket webSocket, CharSequence data, boolean last) {
// fast response.
String response = "The text has " + data.length() + " chars";
return CompletableFuture.completedFuture(response);
}
I have had some trouble getting various examples working. Specifically, I had trouble finding examples that actually showed how to open, send, and receive simple text messages. One important piece was having a server to which to connect. Here is what I managed to make work.
package webSockets;
import java.io.IOException;
import java.net.URI;
import javax.websocket.CloseReason;
import javax.websocket.ContainerProvider;
import javax.websocket.Endpoint;
import javax.websocket.EndpointConfig;
import javax.websocket.MessageHandler;
import javax.websocket.Session;
import javax.websocket.WebSocketContainer;
public class SimpleWebsocketClient extends Endpoint {
private Session session;
public SimpleWebsocketClient() {}
public SimpleWebsocketClient(URI endpointURI) {
try {
WebSocketContainer container = ContainerProvider.getWebSocketContainer();
container.connectToServer(this, endpointURI);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
#Override
public void onClose(Session session, CloseReason reason){
System.out.println("Disconnected as a result of "+ reason.getReasonPhrase());
}
#Override
public void onError(Session session, Throwable error){
System.out.println("Error communicating with server: " + error.getMessage());
}
#Override
public void onOpen(Session s, EndpointConfig config) {
System.out.println("Session opened");
session = s;
session.addMessageHandler(new MessageHandler.Whole<String>() {
#Override
public void onMessage(String msg) {
System.out.println("Text Message Received:" + msg);
}
});
try {
session.getBasicRemote().sendText("Hello there.");
session.getBasicRemote().sendText("Hope you are well!");
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public static void main(String...arg) {
URI uri = URI.create("ws://connect.websocket.in/v3/1?api_key=oCdCMcMPQpbvNjUIzqtvF1d2X2okWpDQj4AwARJuAgtjhzKxVEjQU6IdCjwm&notify_self");
new SimpleWebsocketClient(uri);
while(true) {}
}
}

Message is not consumed by all consumers when network brokers is configured in ActiveMQ

I have 2 instances of my application on the same machine (although it could be on different machines as well) with two Tomcat instances with different ports and Apache ActiveMQ is embedded in the application.
I have configured a static network of brokers so that the message from one instance can be consumed by all other instance as well (each instance can be producer and consumer).
servlet:
package com.activemq.servlet;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import javax.jms.JMSException;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.activemq.ActiveMQStartup;
import com.activemq.MQPublisher;
import com.activemq.SendMsg;
import com.activemq.SendMsgToAllInstance;
import com.activemq.TestPublisher;
/**
* Servlet implementation class ActiveMQStartUpServlet
*/
#WebServlet(value = "/activeMQStartUpServlet", loadOnStartup = 1)
public class ActiveMQStartUpServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private ActiveMQStartup mqStartup = null;
private static final Map pooledPublishers = new HashMap();
#Override
public void init(ServletConfig config) throws ServletException {
System.out.println("starting servelt--------------");
super.init(config);
//Apache Active MQ Startup
mqStartup = new ActiveMQStartup();
mqStartup.startBrokerService();
}
#Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
System.out.println(req.getParameter("distributedMsg"));
String mqConfig = null;
String distributedMsg = req.getParameter("distributedMsg");
String simpleMsg = req.getParameter("simpleMsg");
if (distributedMsg != null && !distributedMsg.equals(""))
mqConfig = "distributedMsg";
else if (simpleMsg != null && !simpleMsg.equals(""))
mqConfig = "simpleMsg";
MQPublisher publisher = acquirePublisher(mqConfig);
try {
publisher.publish(mqConfig);
} catch (JMSException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
releasePublisher(publisher);
}
}
#SuppressWarnings("unchecked")
private void releasePublisher(MQPublisher publisher) {
if (publisher == null) return;
#SuppressWarnings("rawtypes")
LinkedList publishers;
TestPublisher poolablePublisher = (TestPublisher)publisher;
publishers = getPooledPublishers(poolablePublisher.getConfigurationName());
synchronized (publishers) {
publishers.addLast(poolablePublisher);
}
}
private MQPublisher acquirePublisher(String mqConfig) {
LinkedList publishers = getPooledPublishers(mqConfig);
MQPublisher publisher = getMQPubliser(publishers);
if (publisher != null) return publisher;
try {
if (mqConfig.equals("distributedMsg"))
return new TestPublisher(MQConfiguration.getConfiguration("distributedMsg"), new SendMsgToAllInstance());
else
return new TestPublisher(MQConfiguration.getConfiguration("simpleMsg"), new SendMsg());
}catch(Exception e){
e.printStackTrace();
}
return null;
}
private LinkedList getPooledPublishers(String mqConfig) {
LinkedList publishers = null;
publishers = (LinkedList) pooledPublishers.get(mqConfig);
if (publishers == null) {
synchronized(pooledPublishers) {
publishers = (LinkedList) pooledPublishers.get(mqConfig);
if (publishers == null) {
publishers = new LinkedList();
pooledPublishers.put(mqConfig, publishers);
}
}
}
return publishers;
}
private MQPublisher getMQPubliser(LinkedList publishers) {
synchronized (publishers) {
while (!publishers.isEmpty()) {
TestPublisher publisher = (TestPublisher)publishers.removeFirst();
return publisher;
}
}
return null;
}
}
Configuration:
package com.activemq.servlet;
import java.util.HashMap;
import java.util.Map;
import javax.jms.JMSException;
import javax.jms.Topic;
import javax.jms.TopicConnection;
import javax.jms.TopicConnectionFactory;
import javax.jms.TopicSession;
import org.apache.activemq.ActiveMQConnectionFactory;
import com.activemq.ActiveMQContext;
public class MQConfiguration {
private static final Map configurations = new HashMap();
private String mqConfig;
private String topicName;
private TopicConnection topicConnection = null;
private MQConfiguration(String mqConfig, String string, String string2) {
this.mqConfig = mqConfig;
try {
String topicFactoryConName = ActiveMQContext.getProperty(mqConfig);
this.topicName = (mqConfig.equals("distributedMsg") ? ActiveMQContext.getProperty("distributedTopic"):ActiveMQContext.getProperty("normalTopic"));
TopicConnectionFactory factory = (ActiveMQConnectionFactory) ActiveMQContext.getContext()
.lookup(topicFactoryConName);
this.topicConnection = factory.createTopicConnection();
this.topicConnection.start();
} catch (Exception e) {
System.out.println("error: " + e);
}
}
public static MQConfiguration getConfiguration(String mqConfig) {
if (mqConfig == null || "".equals(mqConfig)) {
throw new IllegalArgumentException("mqConfig is null or empty");
}
MQConfiguration config = null;
if (config != null) {
return config;
}
synchronized (configurations) {
config = (MQConfiguration) configurations.get(mqConfig);
if (config == null) {
config = new MQConfiguration(mqConfig, "userName", "userPassword");
}
configurations.put(mqConfig, config);
}
return config;
}
public String getMqConfig() {
return this.mqConfig;
}
public TopicSession createTopicSession(boolean isTransacted, int autoAcknowledge) throws JMSException {
if (this.topicConnection == null) {
IllegalStateException ise = new IllegalStateException("topic connection not configured");
throw ise;
}
return this.topicConnection.createTopicSession(isTransacted, autoAcknowledge);
}
public Topic getTopic() {
try {
return (Topic) ActiveMQContext.getContext().lookup(this.topicName);
} catch (Exception e) {
e.getMessage();
}
return null;
}
}
publisher:
package com.activemq;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.MessageListener;
import javax.jms.Session;
import javax.jms.TextMessage;
import javax.jms.Topic;
import javax.jms.TopicPublisher;
import javax.jms.TopicSession;
import com.activemq.servlet.MQConfiguration;
public class TestPublisher implements MQPublisher {
private final String configurationName;
private TopicSession topicSession = null;
private TopicPublisher topicPublisher = null;
public TestPublisher(MQConfiguration config, Object messageListener) throws JMSException {
if (config == null) {
throw new IllegalArgumentException("config == null");
}
Topic topic = config.getTopic();
this.configurationName = config.getMqConfig();
this.topicSession = config.createTopicSession(false, Session.AUTO_ACKNOWLEDGE);
this.topicPublisher = this.topicSession.createPublisher(topic);
MessageConsumer msgConsumer = this.topicSession.createConsumer(topic);
msgConsumer.setMessageListener((MessageListener) messageListener);
}
#Override
public void publish(String msg) throws JMSException {
this.topicPublisher.publish(createMessage(msg, this.topicSession));
}
private Message createMessage(String msg, Session session) throws JMSException {
TextMessage message = session.createTextMessage(msg);
return message;
}
public String getConfigurationName() {
return this.configurationName;
}
}
Consumer:
package com.activemq;
import javax.jms.Message;
import javax.jms.MessageListener;
public class SendMsgToAllInstance implements MessageListener {
#Override
public void onMessage(Message arg0) {
System.out.println("distributed message-------------");
// We have call to dao layer to to fetch some data and cached it
}
}
JNDI:activemq-jndi.properties
# JNDI properties file to setup the JNDI server within ActiveMQ
#
# Default JNDI properties settings
#
java.naming.factory.initial=org.apache.activemq.jndi.ActiveMQInitialContextFactory
java.naming.provider.url=tcp://localhost:61616
activemq.network.connector=static:(tcp://localhost:61620)
#activemq.network.connector=broker:(tcp://localhost:61619,network:static:tcp://localhost:61620)?persistent=false&useJmx=true
activemq.data.directory=data61619
activemq.jmx.port=1099
#
# Set the connection factory name(s) as well as the destination names. The connection factory name(s)
# as well as the second part (after the dot) of the left hand side of the destination definition
# must be used in the JNDI lookups.
#
connectionFactoryNames = distributedMsgFactory,simpleMsgFactory
topic.jms/distributedTopic=distributedTopic
topic.jms/normalTopic=normalTopic
distributedMsg=distributedMsgFactory
simpleMsg=simpleMsgFactory
distributedTopic=jms/distributedTopic
normalTopic=jms/normalTopic
ActiveMQStartup:
package com.activemq;
import java.net.URI;
import org.apache.activemq.broker.BrokerPlugin;
import org.apache.activemq.broker.BrokerService;
import org.apache.activemq.broker.TransportConnector;
import org.apache.activemq.broker.jmx.ManagementContext;
import org.apache.activemq.network.NetworkConnector;
import org.apache.activemq.security.JaasAuthenticationPlugin;
public class ActiveMQStartup {
private final String bindAddress;
private final String dataDirectory;
private BrokerService broker = new BrokerService();
protected final int numRestarts = 3;
protected final int networkTTL = 2;
protected final int consumerTTL = 2;
protected final boolean dynamicOnly = true;
protected final String networkBroker;
protected final String jmxPort;
public ActiveMQStartup() {
ActiveMQContext context = new ActiveMQContext();
context.loadJndiProperties();
bindAddress = ActiveMQContext.getProperty("java.naming.provider.url");
dataDirectory = ActiveMQContext.getProperty("activemq.data.directory");
networkBroker = ActiveMQContext.getProperty("activemq.network.connector");
jmxPort = ActiveMQContext.getProperty("activemq.jmx.port");
}
// Start activemq broker service
public void startBrokerService() {
try {
broker.setDataDirectory("../" + dataDirectory);
broker.setBrokerName(dataDirectory);
broker.setUseShutdownHook(true);
TransportConnector connector = new TransportConnector();
connector.setUri(new URI(bindAddress));
//broker.setPlugins(new BrokerPlugin[]{new JaasAuthenticationPlugin()});
ManagementContext mgContext = new ManagementContext();
if (networkBroker != null && !networkBroker.isEmpty()) {
NetworkConnector networkConnector = broker.addNetworkConnector(networkBroker);
networkConnector.setName(dataDirectory);
mgContext.setConnectorPort(Integer.parseInt(jmxPort));
broker.setManagementContext(mgContext);
configureNetworkConnector(networkConnector);
}
broker.setNetworkConnectorStartAsync(true);
broker.addConnector(connector);
broker.start();
} catch (Exception e) {
System.out.println("Failed to start Apache MQ Broker : " + e);
}
}
private void configureNetworkConnector(NetworkConnector networkConnector) {
networkConnector.setDuplex(true);
networkConnector.setNetworkTTL(networkTTL);
networkConnector.setDynamicOnly(dynamicOnly);
networkConnector.setConsumerTTL(consumerTTL);
//networkConnector.setStaticBridge(true);
}
// Stop broker service
public void stopBrokerService() {
try {
broker.stop();
} catch (Exception e) {
System.out.println("Unable to stop the ApacheMQ Broker service " + e);
}
}
}
I am starting the tomcat instance one by one and seeing the network connection between the broker is getting established.
When I am sending messge from instance1 or instance2(first time) it is consuming on that instance only, but when I am sending message from the second instance it is consumed by both;
Code in git: https://github.com/AratRana/ApacheActiveMQ
Could you point me where I am wrong?
Finally, I am able to do it. When I started the consumer during server startup then I am able to see the message consumer in all instances. So to achieve this the consumers needs to be started before publishing any message.

Apache flume custom interceptor - HDFS file in binary and strange

I am relatively new to the flume interceptors concept and facing an issue where before applying the interceptor the file sinked is normal text file and after applying the interceptor everything turns really bad.
My interceptor code as below -
package com.flume;
import org.apache.flume.*;
import org.apache.flume.interceptor.*;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class CustomHostInterceptor implements Interceptor {
private String hostValue;
private String hostHeader;
public CustomHostInterceptor(String hostHeader){
this.hostHeader = hostHeader;
}
#Override
public void initialize() {
// At interceptor start up
try {
hostValue =
InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
throw new FlumeException("Cannot get Hostname", e);
}
}
#Override
public Event intercept(Event event) {
// This is the event's body
String body = new String(event.getBody());
if(body.toLowerCase().contains("text")){
try {
event.setBody("hadoop".getBytes("UTF-8"));
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// These are the event's headers
Map<String, String> headers = event.getHeaders();
// Enrich header with hostname
headers.put(hostHeader, hostValue);
// Let the enriched event go
return event;
}
#Override
public List<Event> intercept(List<Event> events) {
List<Event> interceptedEvents =
new ArrayList<Event>(events.size());
for (Event event : events) {
// Intercept any event
Event interceptedEvent = intercept(event);
interceptedEvents.add(interceptedEvent);
}
return interceptedEvents;
}
#Override
public void close() {
// At interceptor shutdown
}
public static class Builder
implements Interceptor.Builder {
private String hostHeader;
#Override
public void configure(Context context) {
// Retrieve property from flume conf
hostHeader = context.getString("hostHeader");
}
#Override
public Interceptor build() {
return new CustomHostInterceptor(hostHeader);
}
}
}
Flume conf is -
agent.sources=exec-source
agent.sinks=hdfs-sink
agent.channels=ch1
agent.sources.exec-source.type=exec
agent.sources.exec-source.command=tail -F /home/cloudera/Desktop/app.log
agent.sources.exec-source.interceptors = i1
agent.sources.exec-source.interceptors.i1.type = com.flume.CustomHostInterceptor$Builder
agent.sources.exec-source.interceptors.i1.hostHeader = hostname
agent.sinks.hdfs-sink.type=hdfs
agent.sinks.hdfs-sink.hdfs.path= hdfs://localhost:8020/bosch/flume/applogs
agent.sinks.hdfs-sink.hdfs.filePrefix=logs
agent.sinks.hdfs-sink.hdfs.rollInterval=60
agent.sinks.hdfs-sink.hdfs.rollSize=0
agent.channels.ch1.type=memory
agent.channels.ch1.capacity=1000
agent.sources.exec-source.channels=ch1
agent.sinks.hdfs-sink.channel=ch1
on doing a cat on the file created in HDFS -
SEQ!org.apache.hadoop.io.LongWritable"org.apache.hadoop.io.BytesWritable���*q�CJv�/ESmP�ź
some textP�żc
some more textP���K
textP��ߌangels and deamonsP��%�
text bla blaP��1�angels and deamonsP��1�
testP��1�hmmmP��1�anything
Any suggestions?
Thanks
Looks like nothing Wrong with Interceptor.
In your Flume Agent config.
You are not specifying this property (hdfs.fileType) so it is taking this as a default SequenceFile
Try adding this line to your HDFS SINK and let me know if this works.
agent.sinks.hdfs-sink.hdfs.fileType=DataStream

Jetty Websocket Compilation Errors

I am trying to do an Jetty Web Socket example .
I copied a example from internet , which was working fine when i deployed directly into server without making any chnages .
But when i copied the Source (the servlet) into Eclipse IDE , it was giving Compilation
Exceptions related to
The method onClose(int, String) of type Html5Servlet.StockTickerSocket must override a superclass method
- The method onOpen(WebSocket.Connection) of type Html5Servlet.StockTickerSocket must override a superclass method
The method onMessage(String) of type Html5Servlet.StockTickerSocket must override a superclass method
This is my servlet , i kept the jars as it is mentioned in that example
package org.ajeesh.app;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicInteger;
import javax.servlet.http.HttpServletRequest;
import org.eclipse.jetty.websocket.WebSocket;
import org.eclipse.jetty.websocket.WebSocketServlet;
public class Html5Servlet extends WebSocketServlet {
private AtomicInteger index = new AtomicInteger();
private static final List<String> tickers = new ArrayList<String>();
static{
tickers.add("ajeesh");
tickers.add("peeyu");
tickers.add("kidillan");
tickers.add("entammo");
}
/**
*
*/
private static final long serialVersionUID = 1L;
public WebSocket doWebSocketConnect(HttpServletRequest req, String resp) {
System.out.println("On server");
return new StockTickerSocket();
}
protected String getMyJsonTicker(){
StringBuilder start=new StringBuilder("{");
start.append("\"stocks\":[");
int counter=0;
for (String aTicker : tickers) {
counter++;
start.append("{ \"ticker\":\""+aTicker +"\""+","+"\"price\":\""+index.incrementAndGet()+"\" }");
if(counter<tickers.size()){
start.append(",");
}
}
start.append("]");
start.append("}");
return start.toString();
}
public class StockTickerSocket implements WebSocket.OnTextMessage{
private Connection connection;
private Timer timer;
#Override
public void onClose(int arg0, String arg1) {
System.out.println("Web socket closed!");
}
#Override
public void onOpen(Connection connection) {
System.out.println("onOpen!");
this.connection=connection;
this.timer=new Timer();
}
#Override
public void onMessage(String data) {
System.out.println("onMessage!");
if(data.indexOf("disconnect")>=0){
connection.close();
timer.cancel();
}else{
sendMessage();
}
}
private void sendMessage() {
System.out.println("sendMessage!");
if(connection==null||!connection.isOpen()){
System.out.println("Connection is closed!!");
return;
}
timer.schedule(new TimerTask() {
#Override
public void run() {
try{
System.out.println("Running task");
connection.sendMessage(getMyJsonTicker());
}
catch (IOException e) {
e.printStackTrace();
}
}
}, new Date(),5000);
}
}
}

Jetty 9 WebSocketListener, beforeConnect

Can WebSocketListener provide a beforeWebSocketConnect method ?
I would like to check if request parameters are correct before opening the socket and deny connection if they are not.
However, is there a way to cancel protocol switch ?
Starting with Jetty 9, you can use the WebSocketCreator concepts to achieve this behavior.
package org.eclipse.jetty.websocket.server.examples;
import java.io.IOException;
import org.eclipse.jetty.websocket.api.UpgradeRequest;
import org.eclipse.jetty.websocket.api.UpgradeResponse;
import org.eclipse.jetty.websocket.server.examples.echo.BigEchoSocket;
import org.eclipse.jetty.websocket.servlet.WebSocketCreator;
import org.eclipse.jetty.websocket.servlet.WebSocketServlet;
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory;
#SuppressWarnings("serial")
public class MyCustomCreationServlet extends WebSocketServlet
{
public static class MyCustomCreator implements WebSocketCreator
{
#Override
public Object createWebSocket(UpgradeRequest req, UpgradeResponse resp)
{
String query = req.getQueryString();
// Start looking at the UpgradeRequest to determine what you want to do
if ((query == null) || (query.length() <= 0))
{
try
{
// Let UPGRADE request for websocket fail with
// status code 403 (FORBIDDEN) [per RFC-6455]
resp.sendForbidden("Unspecified query");
}
catch (IOException e)
{
// An input or output exception occurs
e.printStackTrace();
}
// No UPGRADE
return null;
}
// Create the websocket we want to
if (query.contains("bigecho"))
{
return new BigEchoSocket();
}
else if (query.contains("echo"))
{
return new MyEchoSocket();
}
// Let UPGRADE fail with 503 (UNAVAILABLE)
return null;
}
}
#Override
public void configure(WebSocketServletFactory factory)
{
factory.setCreator(new MyCustomCreator());
}
}

Resources