Read Application Object from GemFire using Spring Data GemFire. Data stored using SpringXD's gemfire-json-server - spring-xd

I'm using the gemfire-json-server module in SpringXD to populate a GemFire grid with json representation of “Order” objects. I understand the gemfire-json-server module saves data in Pdx form in GemFire. I’d like to read the contents of the GemFire grid into an “Order” object in my application. I get a ClassCastException that reads:
java.lang.ClassCastException: com.gemstone.gemfire.pdx.internal.PdxInstanceImpl cannot be cast to org.apache.geode.demo.cc.model.Order
I’m using the Spring Data GemFire libraries to read contents of the cluster. The code snippet to read the contents of the Grid follows:
public interface OrderRepository extends GemfireRepository<Order, String>{
Order findByTransactionId(String transactionId);
}
How can I use Spring Data GemFire to convert data read from the GemFire cluster into an Order object?
Note: The data was initially stored in GemFire using SpringXD's gemfire-json-server-module

Still waiting to hear back from the GemFire PDX engineering team, specifically on Region.get(key), but, interestingly enough if you annotate your application domain object with...
#JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "#type")
public class Order ... {
...
}
This works!
Under-the-hood I knew the GemFire JSONFormatter class (see here) used Jackson's API to un/marshal (de/serialize) JSON data to and from PDX.
However, the orderRepository.findOne(ID) and ordersRegion.get(key) still do not function as I would expect. See updated test class below for more details.
Will report back again when I have more information.
#RunWith(SpringJUnit4ClassRunner.class)
#ContextConfiguration(classes = GemFireConfiguration.class)
#SuppressWarnings("unused")
public class JsonToPdxToObjectDataAccessIntegrationTest {
protected static final AtomicLong ID_SEQUENCE = new AtomicLong(0l);
private Order amazon;
private Order bestBuy;
private Order target;
private Order walmart;
#Autowired
private OrderRepository orderRepository;
#Resource(name = "Orders")
private com.gemstone.gemfire.cache.Region<Long, Object> orders;
protected Order createOrder(String name) {
return createOrder(ID_SEQUENCE.incrementAndGet(), name);
}
protected Order createOrder(Long id, String name) {
return new Order(id, name);
}
protected <T> T fromPdx(Object pdxInstance, Class<T> toType) {
try {
if (pdxInstance == null) {
return null;
}
else if (toType.isInstance(pdxInstance)) {
return toType.cast(pdxInstance);
}
else if (pdxInstance instanceof PdxInstance) {
return new ObjectMapper().readValue(JSONFormatter.toJSON(((PdxInstance) pdxInstance)), toType);
}
else {
throw new IllegalArgumentException(String.format("Expected object of type PdxInstance; but was (%1$s)",
pdxInstance.getClass().getName()));
}
}
catch (IOException e) {
throw new RuntimeException(String.format("Failed to convert PDX to object of type (%1$s)", toType), e);
}
}
protected void log(Object value) {
System.out.printf("Object of Type (%1$s) has Value (%2$s)", ObjectUtils.nullSafeClassName(value), value);
}
protected Order put(Order order) {
Object existingOrder = orders.putIfAbsent(order.getTransactionId(), toPdx(order));
return (existingOrder != null ? fromPdx(existingOrder, Order.class) : order);
}
protected PdxInstance toPdx(Object obj) {
try {
return JSONFormatter.fromJSON(new ObjectMapper().writeValueAsString(obj));
}
catch (JsonProcessingException e) {
throw new RuntimeException(String.format("Failed to convert object (%1$s) to JSON", obj), e);
}
}
#Before
public void setup() {
amazon = put(createOrder("Amazon Order"));
bestBuy = put(createOrder("BestBuy Order"));
target = put(createOrder("Target Order"));
walmart = put(createOrder("Wal-Mart Order"));
}
#Test
public void regionGet() {
assertThat((Order) orders.get(amazon.getTransactionId()), is(equalTo(amazon)));
}
#Test
public void repositoryFindOneMethod() {
log(orderRepository.findOne(target.getTransactionId()));
assertThat(orderRepository.findOne(target.getTransactionId()), is(equalTo(target)));
}
#Test
public void repositoryQueryMethod() {
assertThat(orderRepository.findByTransactionId(amazon.getTransactionId()), is(equalTo(amazon)));
assertThat(orderRepository.findByTransactionId(bestBuy.getTransactionId()), is(equalTo(bestBuy)));
assertThat(orderRepository.findByTransactionId(target.getTransactionId()), is(equalTo(target)));
assertThat(orderRepository.findByTransactionId(walmart.getTransactionId()), is(equalTo(walmart)));
}
#Region("Orders")
#JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "#type")
public static class Order implements PdxSerializable {
protected static final OrderPdxSerializer pdxSerializer = new OrderPdxSerializer();
#Id
private Long transactionId;
private String name;
public Order() {
}
public Order(Long transactionId) {
this.transactionId = transactionId;
}
public Order(Long transactionId, String name) {
this.transactionId = transactionId;
this.name = name;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
public Long getTransactionId() {
return transactionId;
}
public void setTransactionId(final Long transactionId) {
this.transactionId = transactionId;
}
#Override
public void fromData(PdxReader reader) {
Order order = (Order) pdxSerializer.fromData(Order.class, reader);
if (order != null) {
this.transactionId = order.getTransactionId();
this.name = order.getName();
}
}
#Override
public void toData(PdxWriter writer) {
pdxSerializer.toData(this, writer);
}
#Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof Order)) {
return false;
}
Order that = (Order) obj;
return ObjectUtils.nullSafeEquals(this.getTransactionId(), that.getTransactionId());
}
#Override
public int hashCode() {
int hashValue = 17;
hashValue = 37 * hashValue + ObjectUtils.nullSafeHashCode(getTransactionId());
return hashValue;
}
#Override
public String toString() {
return String.format("{ #type = %1$s, id = %2$d, name = %3$s }",
getClass().getName(), getTransactionId(), getName());
}
}
public static class OrderPdxSerializer implements PdxSerializer {
#Override
public Object fromData(Class<?> type, PdxReader in) {
if (Order.class.equals(type)) {
return new Order(in.readLong("transactionId"), in.readString("name"));
}
return null;
}
#Override
public boolean toData(Object obj, PdxWriter out) {
if (obj instanceof Order) {
Order order = (Order) obj;
out.writeLong("transactionId", order.getTransactionId());
out.writeString("name", order.getName());
return true;
}
return false;
}
}
public interface OrderRepository extends GemfireRepository<Order, Long> {
Order findByTransactionId(Long transactionId);
}
#Configuration
protected static class GemFireConfiguration {
#Bean
public Properties gemfireProperties() {
Properties gemfireProperties = new Properties();
gemfireProperties.setProperty("name", JsonToPdxToObjectDataAccessIntegrationTest.class.getSimpleName());
gemfireProperties.setProperty("mcast-port", "0");
gemfireProperties.setProperty("log-level", "warning");
return gemfireProperties;
}
#Bean
public CacheFactoryBean gemfireCache(Properties gemfireProperties) {
CacheFactoryBean cacheFactoryBean = new CacheFactoryBean();
cacheFactoryBean.setProperties(gemfireProperties);
//cacheFactoryBean.setPdxSerializer(new MappingPdxSerializer());
cacheFactoryBean.setPdxSerializer(new OrderPdxSerializer());
cacheFactoryBean.setPdxReadSerialized(false);
return cacheFactoryBean;
}
#Bean(name = "Orders")
public PartitionedRegionFactoryBean ordersRegion(Cache gemfireCache) {
PartitionedRegionFactoryBean regionFactoryBean = new PartitionedRegionFactoryBean();
regionFactoryBean.setCache(gemfireCache);
regionFactoryBean.setName("Orders");
regionFactoryBean.setPersistent(false);
return regionFactoryBean;
}
#Bean
public GemfireRepositoryFactoryBean orderRepository() {
GemfireRepositoryFactoryBean<OrderRepository, Order, Long> repositoryFactoryBean =
new GemfireRepositoryFactoryBean<>();
repositoryFactoryBean.setRepositoryInterface(OrderRepository.class);
return repositoryFactoryBean;
}
}
}

So, as you are aware, GemFire (and by extension, Apache Geode) stores JSON in PDX format (as a PdxInstance). This is so GemFire can interoperate with many different language-based clients (native C++/C#, web-oriented (JavaScript, Pyhton, Ruby, etc) using the Developer REST API, in addition to Java) and also to be able to use OQL to query the JSON data.
After a bit of experimentation, I am surprised GemFire is not behaving as I would expect. I created an example, self-contained test class (i.e. no Spring XD, of course) that simulates your use case... essentially storing JSON data in GemFire as PDX and then attempting to read the data back out as the Order application domain object type using the Repository abstraction, logical enough.
Given the use of the Repository abstraction and implementation from Spring Data GemFire, the infrastructure will attempt to access the application domain object based on the Repository generic type parameter (in this case "Order" from the "OrderRepository" definition).
However, the data is stored in PDX, so now what?
No matter, Spring Data GemFire provides the MappingPdxSerializer class to convert PDX instances back to application domain objects using the same "mapping meta-data" that the Repository infrastructure uses. Cool, so I plug that in...
#Bean
public CacheFactoryBean gemfireCache(Properties gemfireProperties) {
CacheFactoryBean cacheFactoryBean = new CacheFactoryBean();
cacheFactoryBean.setProperties(gemfireProperties);
cacheFactoryBean.setPdxSerializer(new MappingPdxSerializer());
cacheFactoryBean.setPdxReadSerialized(false);
return cacheFactoryBean;
}
You will also notice, I set the PDX 'read-serialized' property (cacheFactoryBean.setPdxReadSerialized(false);) to false in order to ensure data access operations return the domain object and not the PDX instance.
However, this had no affect on the query method. In fact, it had no affect on the following operations either...
orderRepository.findOne(amazonOrder.getTransactionId());
ordersRegion.get(amazonOrder.getTransactionId());
Both calls returned a PdxInstance. Note, the implementation of OrderRepository.findOne(..) is based on SimpleGemfireRepository.findOne(key), which uses GemfireTemplate.get(key), which just performs Region.get(key), and so is effectively the same as (ordersRegion.get(amazonOrder.getTransactionId();). The outcome should not be, especially with Region.get() and read-serialized set to false.
With the OQL query (SELECT * FROM /Orders WHERE transactionId = $1) generated from the findByTransactionId(String id), the Repository infrastructure has a bit less control over what the GemFire query engine will return based on what the caller (OrderRepository) expects (based on the generic type parameter), so running OQL statements could potentially behave differently than direct Region access using get.
Next, I went onto try modifying the Order type to implement PdxSerializable, to handle the conversion during data access operations (direct Region access with get, OQL, or otherwise). This had no affect.
So, I tried to implement a custom PdxSerializer for Order objects. This had no affect either.
The only thing I can conclude at this point is something is getting lost in translation between Order -> JSON -> PDX and then from PDX -> Order. Seemingly, GemFire needs additional type meta-data required by PDX (something like #JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "#type") in the JSON data that PDXFormatter recognizes, though I am not certain it does.
Note, in my test class, I used Jackson's ObjectMapper to serialize the Order to JSON and then GemFire's JSONFormatter to serialize the JSON to PDX, which I suspect Spring XD is doing similarly under-the-hood. In fact, Spring XD uses Spring Data GemFire and is most likely using the JSON Region Auto Proxy support. That is exactly what SDG's JSONRegionAdvice object does (see here).
Anyway, I have an inquiry out to the rest of the GemFire engineering team. There are also things that could be done in Spring Data GemFire to ensure the PDX data is converted, such as making use of the MappingPdxSerializer directly to convert the data automatically on behalf of the caller if the data is indeed of type PdxInstance. Similar to how JSON Region Auto Proxying works, you could write AOP interceptor for the Orders Region to automagicaly convert PDX to an Order.
Though, I don't think any of this should be necessary as GemFire should be doing the right thing in this case. Sorry I don't have a better answer right now. Let's see what I find out.
Cheers and stay tuned!
See subsequent post for test code.

Related

Cache Kafka Records using Caffeine Cache Springboot

I am trying to cache Kafka Records within 3 minutes of interval post that it will get expired and removed from the cache.
Each incoming records which is fetched using kafka consumer written in springboot needs to be updated in cache first then if it is present i need to discard the next duplicate records if it matches the cache record.
I have tried using Caffeine cache as below,
#EnableCaching
public class AppCacheManagerConfig {
#Bean
public CacheManager cacheManager(Ticker ticker) {
CaffeineCache bookCache = buildCache("declineRecords", ticker, 3);
SimpleCacheManager cacheManager = new SimpleCacheManager();
cacheManager.setCaches(Collections.singletonList(bookCache));
return cacheManager;
}
private CaffeineCache buildCache(String name, Ticker ticker, int minutesToExpire) {
return new CaffeineCache(name, Caffeine.newBuilder().expireAfterWrite(minutesToExpire, TimeUnit.MINUTES)
.maximumSize(100).ticker(ticker).build());
}
#Bean
public Ticker ticker() {
return Ticker.systemTicker();
}
}
and my Kafka Consumer is as below,
#Autowired
CachingServiceImpl cachingService;
#KafkaListener(topics = "#{'${spring.kafka.consumer.topic}'}", concurrency = "#{'${spring.kafka.consumer.concurrentConsumers}'}", errorHandler = "#{'${spring.kafka.consumer.errorHandler}'}")
public void consume(Message<?> message, Acknowledgment acknowledgment,
#Header(KafkaHeaders.RECEIVED_TIMESTAMP) long createTime) {
logger.info("Recieved Message: " + message.getPayload());
try {
boolean approveTopic = false;
boolean duplicateRecord = false;
if (cachingService.isDuplicateCheck(declineRecord)) {
//do something with records
}
else
{
//do something with records
}
cachingService.putInCache(xmlJSONObj, declineRecord, time);
and my caching service is as below,
#Component
public class CachingServiceImpl {
private static final Logger logger = LoggerFactory.getLogger(CachingServiceImpl.class);
#Autowired
CacheManager cacheManager;
#Cacheable(value = "declineRecords", key = "#declineRecord", sync = true)
public String putInCache(JSONObject xmlJSONObj, String declineRecord, String time) {
logger.info("Record is Cached for 3 minutes interval check", declineRecord);
cacheManager.getCache("declineRecords").put(declineRecord, time);
return declineRecord;
}
public boolean isDuplicateCheck(String declineRecord) {
if (null != cacheManager.getCache("declineRecords").get(declineRecord)) {
return true;
}
return false;
}
}
But Each time a record comes in consumer my cache is always empty. Its not holding the records.
Modifications Done:
I have added Configuration file as below after going through the suggestions and more kind of R&D removed some of the earlier logic and now the caching is working as expected but duplicate check is failing when all the three consumers are sending the same records.
`
#Configuration
public class AppCacheManagerConfig {
public static Cache<String, Object> jsonCache =
Caffeine.newBuilder().expireAfterWrite(3, TimeUnit.MINUTES)
.maximumSize(10000).recordStats().build();
#Bean
public CacheLoader<Object, Object> cacheLoader() {
CacheLoader<Object, Object> cacheLoader = new CacheLoader<Object, Object>() {
#Override
public Object load(Object key) throws Exception {
return null;
}
#Override
public Object reload(Object key, Object oldValue) throws Exception {
return oldValue;
}
};
return cacheLoader;
}
`
Now i am using the above cache as manual put and get.
I guess you're trying to implement records deduplication for Kafka.
Here is the similar discussion:
https://github.com/spring-projects/spring-kafka/issues/80
Here is the current abstract class which you may extend to achieve the necessary result:
https://github.com/spring-projects/spring-kafka/blob/master/spring-kafka/src/main/java/org/springframework/kafka/listener/adapter/AbstractFilteringMessageListener.java
Your caching service is definitely incorrect: Cacheable annotation allows marking the data getters and setters, to add caching through AOP. While in the code you clearly implement some low-level cache updating logic of your own.
At least next possible changes may help you:
Remove #Cacheable. You don't need it because you work with cache manually, so it may be the source of conflicts (especially as soon as you use sync = true). If it helps, remove #EnableCaching as well - it enables support for cache-related Spring annotations which you don't need here.
Try removing Ticker bean with the appropriate parameters for other beans. It should not be harmful as per your configuration, but usually it's helpful only for tests, no need to define it otherwise.
Double-check what is declineRecord. If it's a serialized object, ensure that serialization works properly.
Add recordStats() for cache and output stats() to log for further analysis.

Spring Boot Gemfire Server Configuration

I am trying to understand how to host a Spring Boot Gemfire server process.
I found this example Spring Gemfire Server
The problem I am having is the the server I am trying to add to the cluster is not showing up in the cluster after I start the process.
Here are the steps I am taking:
Start a new locator locally (default port): gfsh>start locator --name=loc-one
I want to add this SpringBootGemfireServer to the cluster:
note I have commented out the embeded locator start-up - I want to add this to the existing locator already running
#SpringBootApplication
#SuppressWarnings("unused")
public class SpringGemFireServerApplication {
private static final boolean DEFAULT_AUTO_STARTUP = true;
public static void main(String[] args) {
SpringApplication.run(SpringGemFireServerApplication.class, args);
}
#Bean
static PropertyPlaceholderConfigurer propertyPlaceholderConfigurer() {
return new PropertyPlaceholderConfigurer();
}
private String applicationName() {
return SpringGemFireServerApplication.class.getSimpleName();
}
#Bean
Properties gemfireProperties(
#Value("${gemfire.log.level:config}") String logLevel,
#Value("${gemfire.locator.host-port:localhost[10334]}") String locatorHostPort,
#Value("${gemfire.manager.port:1099}") String managerPort) {
Properties gemfireProperties = new Properties();
gemfireProperties.setProperty("name", applicationName());
gemfireProperties.setProperty("log-level", logLevel);
//gemfireProperties.setProperty("start-locator", locatorHostPort);
//gemfireProperties.setProperty("jmx-manager", "true");
//gemfireProperties.setProperty("jmx-manager-port", managerPort);
//gemfireProperties.setProperty("jmx-manager-start", "true");
return gemfireProperties;
}
#Bean
CacheFactoryBean gemfireCache(#Qualifier("gemfireProperties") Properties gemfireProperties) {
CacheFactoryBean gemfireCache = new CacheFactoryBean();
gemfireCache.setClose(true);
gemfireCache.setProperties(gemfireProperties);
return gemfireCache;
}
#Bean
CacheServerFactoryBean gemfireCacheServer(Cache gemfireCache,
#Value("${gemfire.cache.server.bind-address:localhost}") String bindAddress,
#Value("${gemfire.cache.server.hostname-for-clients:localhost}") String hostNameForClients,
#Value("${gemfire.cache.server.port:40404}") int port) {
CacheServerFactoryBean gemfireCacheServer = new CacheServerFactoryBean();
gemfireCacheServer.setCache(gemfireCache);
gemfireCacheServer.setAutoStartup(DEFAULT_AUTO_STARTUP);
gemfireCacheServer.setBindAddress(bindAddress);
gemfireCacheServer.setHostNameForClients(hostNameForClients);
gemfireCacheServer.setPort(port);
return gemfireCacheServer;
}
#Bean
PartitionedRegionFactoryBean<Long, Long> factorialsRegion(Cache gemfireCache,
#Qualifier("factorialsRegionAttributes") RegionAttributes<Long, Long> factorialsRegionAttributes) {
PartitionedRegionFactoryBean<Long, Long> factorialsRegion = new PartitionedRegionFactoryBean<>();
factorialsRegion.setAttributes(factorialsRegionAttributes);
factorialsRegion.setCache(gemfireCache);
factorialsRegion.setClose(false);
factorialsRegion.setName("Factorials");
factorialsRegion.setPersistent(false);
return factorialsRegion;
}
#Bean
#SuppressWarnings("unchecked")
RegionAttributesFactoryBean factorialsRegionAttributes() {
RegionAttributesFactoryBean factorialsRegionAttributes = new RegionAttributesFactoryBean();
factorialsRegionAttributes.setCacheLoader(factorialsCacheLoader());
factorialsRegionAttributes.setKeyConstraint(Long.class);
factorialsRegionAttributes.setValueConstraint(Long.class);
return factorialsRegionAttributes;
}
FactorialsCacheLoader factorialsCacheLoader() {
return new FactorialsCacheLoader();
}
class FactorialsCacheLoader implements CacheLoader<Long, Long> {
// stupid, naive implementation of Factorial!
#Override
public Long load(LoaderHelper<Long, Long> loaderHelper) throws CacheLoaderException {
long number = loaderHelper.getKey();
assert number >= 0 : String.format("Number [%d] must be greater than equal to 0", number);
if (number <= 2L) {
return (number < 2L ? 1L : 2L);
}
long result = number;
while (number-- > 1L) {
result *= number;
}
return result;
}
#Override
public void close() {
}
}
}
When I go to gfsh>connect list members
I only see the locator.
I haven't verified the full configuration to check whether there's something else wrong, but the main issue I see right now is that you seem to be confusing the start-locator property (automatically starts a locator in the current process when the member connects to the distributed system and stops the locator when the member disconnects) with the locators property (the list of locators used by system members, it must be configured consistently for every member of the distributed system). Since you're not correctly setting the locators property when configuring the server, it just can't join the existing distributed system because it doesn't know which locator to connect to.
The default locator port used by GemFire is 10334, so you should change your gemfireProperties method as follows:
#Bean
Properties gemfireProperties(#Value("${gemfire.log.level:config}") String logLevel, #Value("${gemfire.locator.host-port:localhost[10334]}") String locatorHostPort, #Value("${gemfire.manager.port:1099}") String managerPort) {
Properties gemfireProperties = new Properties();
gemfireProperties.setProperty("name", applicationName());
gemfireProperties.setProperty("log-level", logLevel);
// You can directly use the locatorHostPort variable instead.
gemfireProperties.setProperty("locators", "localhost[10334]");
return gemfireProperties;
}
Hope this helps.
Cheers.

JAXBElement: providing codec (/converter?) for class java.lang.Class

I have been evaluating to adopt spring-data-mongodb for a project. In summary, my aim is:
Using existing XML schema files to generate Java classes.
This is achieved using JAXB xjc
The root class is TSDProductDataType and is further modeled as below:
The thing to note here is that ExtensionType contains protected List<Object> any; allowing it to store Objects of any class. In my case, it is amongst the classes named TSDModule_Name_HereModuleType and can be browsed here
Use spring-data-mongodb as persistence store
This is achieved using a simple ProductDataRepository
#RepositoryRestResource(collectionResourceRel = "product", path = "product")
public interface ProductDataRepository extends MongoRepository<TSDProductDataType, String> {
TSDProductDataType queryByGtin(#Param("gtin") String gtin);
}
The unmarshalled TSDProductDataType, however, contains JAXBElement which spring-data-mongodb doesn't seem to handle by itself and throws a CodecConfigurationException org.bson.codecs.configuration.CodecConfigurationException: Can't find a codec for class java.lang.Class.
Here is the faulty statement:
TSDProductDataType tsdProductDataType = jaxbElement.getValue();
repository.save(tsdProductDataType);
I tried playing around with Converters for spring-data-mongodb as explained here, however, it seems I am missing something since the exception is about "Codecs" and not "Converters".
Any help is appreciated.
EDIT:
Adding converters for JAXBElement
Note: Works with version 1.5.6.RELEASE of org.springframework.boot::spring-boot-starter-parent. With version 2.0.0.M3, hell breaks loose
It seems that I missed something while trying to add converter earlier. So, I added it like below for testing:
#Component
#ReadingConverter
public class JAXBElementReadConverter implements Converter<DBObject, JAXBElement> {
//#Autowired
//MongoConverter converter;
#Override
public JAXBElement convert(DBObject dbObject) {
Class declaredType, scope;
QName name = qNameFromString((String)dbObject.get("name"));
Object rawValue = dbObject.get("value");
try {
declaredType = Class.forName((String)dbObject.get("declaredType"));
} catch (ClassNotFoundException e) {
if (rawValue.getClass().isArray()) declaredType = List.class;
else declaredType = LinkedHashMap.class;
}
try {
scope = Class.forName((String) dbObject.get("scope"));
} catch (ClassNotFoundException e) {
scope = JAXBElement.GlobalScope.class;
}
//Object value = rawValue instanceof DBObject ? converter.read(declaredType, (DBObject) rawValue) : rawValue;
Object value = "TODO";
return new JAXBElement(name, declaredType, scope, value);
}
QName qNameFromString(String s) {
String[] parts = s.split("[{}]");
if (parts.length > 2) return new QName(parts[1], parts[2], parts[0]);
if (parts.length == 1) return new QName(parts[0]);
return new QName("undef");
}
}
#Component
#WritingConverter
public class JAXBElementWriteConverter implements Converter<JAXBElement, DBObject> {
//#Autowired
//MongoConverter converter;
#Override
public DBObject convert(JAXBElement jaxbElement) {
DBObject dbObject = new BasicDBObject();
dbObject.put("name", qNameToString(jaxbElement.getName()));
dbObject.put("declaredType", jaxbElement.getDeclaredType().getName());
dbObject.put("scope", jaxbElement.getScope().getCanonicalName());
//dbObject.put("value", converter.convertToMongoType(jaxbElement.getValue()));
dbObject.put("value", "TODO");
dbObject.put("_class", JAXBElement.class.getName());
return dbObject;
}
public String qNameToString(QName name) {
if (name.getNamespaceURI() == XMLConstants.NULL_NS_URI) return name.getLocalPart();
return name.getPrefix() + '{' + name.getNamespaceURI() + '}' + name.getLocalPart();
}
}
#SpringBootApplication
public class TsdApplication {
public static void main(String[] args) {
SpringApplication.run(TsdApplication.class, args);
}
#Bean
public CustomConversions customConversions() {
return new CustomConversions(Arrays.asList(
new JAXBElementReadConverter(),
new JAXBElementWriteConverter()
));
}
}
So far so good. However, how do I instantiate MongoConverter converter;?
MongoConverter is an interface so I guess I need an instantiable class adhering to this interface. Any suggestions?
I understand the desire for convenience in being able to just map an existing domain object to the database layer with no boilerplate, but even if you weren't having the JAXB class structure issue, I would still be recommending away from using it verbatim. Unless this is a simple one-off project, you almost definitely will hit a point where your domain models will need to change but your persisted data need to remain in an existing state. If you are just straight persisting the data, you have no mechanism to convert between a newer domain schema and an older persisted data scheme. Versioning of the persisted data scheme would be wise too.
The link you posted for writing the customer converters is one way to achieve this and fits in nicely with the Spring ecosystem. That method should also solve the issue you are experiencing (about the underlying messy JAXB data structure not converting cleanly).
Are you unable to get that method working? Ensure you are loading them into the Spring context with #Component plus auto-class scanning or manually via some Configuration class.
EDIT to address your EDIT:
Add the following to each of your converters:
private final MongoConverter converter;
public JAXBElement____Converter(MongoConverter converter) {
this.converter = converter;
}
Try changing your bean definition to:
#Bean
public CustomConversions customConversions(#Lazy MongoConverter converter) {
return new CustomConversions(Arrays.asList(
new JAXBElementReadConverter(converter),
new JAXBElementWriteConverter(converter)
));
}

Get a specific service implementation based on a parameter

In my Sling app I have data presenting documents, with pages, and content nodes. We mostly server those documents as HTML, but now I would like to have a servlet to serve these documents as PDF and PPT.
Basically, I thought about implementing the factory pattern : in my servlet, dependending on the extension of the request (pdf or ppt), I would get from a DocumentBuilderFactory, the proper DocumentBuilder implementation, either PdfDocumentBuilder or PptDocumentBuilder.
So first I had this:
public class PlanExportBuilderFactory {
public PlanExportBuilder getBuilder(String type) {
PlanExportBuilder builder = null;
switch (type) {
case "pdf":
builder = new PdfPlanExportBuilder();
break;
default:
logger.error("Unsupported plan export builder, type: " + type);
}
return builder;
}
}
In the servlet:
#Component(metatype = false)
#Service(Servlet.class)
#Properties({
#Property(name = "sling.servlet.resourceTypes", value = "myApp/document"),
#Property(name = "sling.servlet.extensions", value = { "ppt", "pdf" }),
#Property(name = "sling.servlet.methods", value = "GET")
})
public class PlanExportServlet extends SlingSafeMethodsServlet {
#Reference
PlanExportBuilderFactory builderFactory;
#Override
protected void doGet(SlingHttpServletRequest request, SlingHttpServletResponse response) throws ServletException, IOException {
Resource resource = request.getResource();
PlanExportBuilder builder = builderFactory.getBuilder(request.getRequestPathInfo().getExtension());
}
}
But the problem is that in the builder I would like to reference other services to access Sling resources, and with this solution, they're not bound.
I looked at Services Factory with OSGi but from what I've understood, you use them to configure differently the same implementation of a service.
Then I found that you can get a specific implementation by naming it, or use a property and a filter.
So I've ended up with this:
public class PlanExportBuilderFactory {
#Reference(target = "(builderType=pdf)")
PlanExportBuilder pdfPlanExportBuilder;
public PlanExportBuilder getBuilder(String type) {
PlanExportBuilder builder = null;
switch (type) {
case "pdf":
return pdfPlanExportBuilder;
default:
logger.error("Unsupported plan export builder, type: " + type);
}
return builder;
}
}
The builder defining a "builderType" property :
// AbstractPlanExportBuilder implements PlanExportBuilder interface
#Component
#Service(value=PlanExportBuilder.class)
public class PdfPlanExportBuilder extends AbstractPlanExportBuilder {
#Property(name="builderType", value="pdf")
public PdfPlanExportBuilder() {
planDocument = new PdfPlanDocument();
}
}
I would like to know if it's a good way to retrieve my PDF builder implementation regarding OSGi good practices.
EDIT 1
From Peter's answer I've tried to add multiple references but with Felix it doesn't seem to work:
#Reference(name = "planExportBuilder", cardinality = ReferenceCardinality.MANDATORY_MULTIPLE, policy = ReferencePolicy.DYNAMIC)
private Map<String, PlanExportBuilder> builders = new ConcurrentHashMap<String, PlanExportBuilder>();
protected final void bindPlanExportBuilder(PlanExportBuilder b, Map<String, Object> props) {
final String type = PropertiesUtil.toString(props.get("type"), null);
if (type != null) {
this.builders.put((String) props.get("type"), b);
}
}
protected final void unbindPlanExportBuilder(final PlanExportBuilder b, Map<String, Object> props) {
final String type = PropertiesUtil.toString(props.get("type"), null);
if (type != null) {
this.builders.remove(type);
}
}
I get these errors :
#Reference(builders) : Missing method bind for reference planExportBuilder
#Reference(builders) : Something went wrong: false - true - MANDATORY_MULTIPLE
#Reference(builders) : Missing method unbind for reference planExportBuilder
The Felix documentation here http://felix.apache.org/documentation/subprojects/apache-felix-maven-scr-plugin/scr-annotations.html#reference says for the bind method:
The default value is the name created by appending the reference name to the string bind. The method must be declared public or protected and take single argument which is declared with the service interface type
So according to this, I understand it cannot work with Felix, as I'm trying to pass two arguments. However, I found an example here that seems to match what you've suggested but I cannot make it work: https://github.com/Adobe-Consulting-Services/acs-aem-samples/blob/master/bundle/src/main/java/com/adobe/acs/samples/services/impl/SampleMultiReferenceServiceImpl.java
EDIT 2
Just had to move the reference above the class to make it work:
#References({
#Reference(
name = "planExportBuilder",
referenceInterface = PlanExportBuilder.class,
policy = ReferencePolicy.DYNAMIC,
cardinality = ReferenceCardinality.OPTIONAL_MULTIPLE)
})
public class PlanExportServlet extends SlingSafeMethodsServlet {
Factories are evil :-) Main reason is of course the yucky class loading hacks that are usually used but also because they tend to have global knowledge. In general, you want to be able to add a bundle with a new DocumentBuilder and then that type should become available.
A more OSGi oriented solution is therefore to use service properties. This could look like:
#Component( property=HTTP_WHITEBOARD_FILTER_REGEX+"=/as")
public class DocumentServlet {
final Map<String,DocBuilder> builders = new ConcurrentHashMap<>();
public void doGet( HttpServletRequest rq, HttpServletResponse rsp )
throws IOException, ServletException {
InputStream in = getInputStream( rq.getPathInfo() );
if ( in == null )
....
String type = toType( rq.getPathInfo(), rq.getParameter("type") );
DocBuilder docbuilder = builders.get( type );
if ( docbuilder == null)
....
docbuilder.convert( type, in, rsp.getOutputStream() );
}
#Reference( cardinality=MULTIPLE, policy=DYNAMIC )
void addDocBuilder( DocBuilder db, Map<String,Object> props ) {
docbuilders.put(props.get("type"), db );
}
void removeDocBuilder(Map<String,Object> props ) {
docbuilders.remove(props.get("type"));
}
}
A DocBuilder could look like:
#Component( property = "type=ppt-pdf" )
public class PowerPointToPdf implements DocBuilder {
...
}

Looking for a solution to extend Spring MVC with another Component/Annotation

Suppose I have a Website that is used in normal mode (browser) and in some other mode, like a MobileView mode (inside a mobile app). For each Controller I create, there might be correspondent controller for MobileView, processing the same url.
The easiest solution is to create ifs in all the Controllers that have MobileView logic. Another solution would be to use a correspondent url for MobileView (similar to the normal url) and two separate Controllers (possible where one extends from another; or use some other way to recycle common code)
But, a more elegant solution would be to have some extra annotations, like #SupportsMobileView (to mark a controller, and tell the app that this will have a correspondent MobileView Controller) and #MobileViewController (to mark a second controller, and tell the app that this controller needs to run immediately after the initial controller marked with #SupportsMobileView). The link between a normal controller and a MobileView controller would be through the url they process (defined with #RequestMapping).
Is it possible to extend Spring MVC (A)? Where to inject new annotation scanners (B) and annotation handlers / component handlers (C)? How should the MobileView controller be executed (D) (right now I am thinking that it could be executed through AOP, where the new handler of my new controller type programatically creates a Join-Point on the corresponding normal controller)
Note that I did not mention how this MobileView mode is triggered and detected. Let's just say that there a Session boolean variable (flag) for that.
Critics on any points (A), (B), (C) or (D) are welcomed, as well as technical hints and alternative solution to any point or the whole solution.
HandlerInterceptor can be used to intercept the RequestMapping handling. This is a simple example how to configure and implement one.
You can check for your session variable and will have a bunch of methods that will allow you to do custom processing or just exchange the view from the normal controller handling with your mobile view.
Ok, warnings:
this is only a proof of concept of what I understood must be done so:
+#MobileViewEnable and #MobileView annotated (and related) methods need to stay in the same controller
+there's no check for the httpAction used
+the two methods must have the same signature
+mobileView annotation value and requestMapping annotation value must be equals and uniques
+the logic inside callYourLogic(..) defines which method is going to be called, at the moment there's a very simple logic that check if exist the parameter ("mobile") in the request, just to test
+this code is not intended to be used as is (at all)
+don't know if it works at all outside my pc (joke :D, ehm..)
SO:
Annotations:
#Retention(RetentionPolicy.RUNTIME)
public #interface MobileView {
String value() default "";
}
#Retention(RetentionPolicy.RUNTIME)
public #interface MobileViewEnable {
}
ExampleController:
#Controller
public class MainController extends BaseController {
private final static Logger logger = LoggerFactory.getLogger(MainController.class);
private final static String PROVA_ROUTE = "prova";
#MobileViewEnable
#RequestMapping(PROVA_ROUTE)
public String prova() {
logger.debug("inside prova!!!");
return "provaview";
}
#MobileView(PROVA_ROUTE)
public String prova2() {
logger.debug("inside prova2!!!");
return "prova2view";
}
}
Aspect definition:
<bean id="viewAspect" class="xxx.yyy.ViewAspect" />
<aop:config>
<aop:pointcut expression="#annotation(xxx.yyy.MobileViewEnable)" id="viewAspectPointcut" />
<aop:aspect ref="viewAspect" order="1">
<aop:around method="around" pointcut-ref="viewAspectPointcut" arg-names="viewAspectPointcut"/>
</aop:aspect>
</aop:config>
Aspect implementation:
public class ViewAspect implements BeforeAdvice, ApplicationContextAware {
private final static Logger logger = LoggerFactory.getLogger(ViewAspect.class);
private ApplicationContext applicationContext;
public Object around(ProceedingJoinPoint joinPoint) {
Method mobileViewAnnotatedMethod = null;
HttpServletRequest request = getCurrentHttpRequest();
String controllerName = getSimpleClassNameWithFirstLetterLowercase(joinPoint);
Object[] interceptedMethodArgs = getInterceptedMethodArgs(joinPoint);
String methodName = getCurrentMethodName(joinPoint);
Method[] methods = getAllControllerMethods(joinPoint);
Method interceptedMethod = getInterceptedMethod(methods, methodName);
String interceptedMethodRoute = getRouteFromInterceptedMethod(interceptedMethod);
if (callYourLogic(request)) {
mobileViewAnnotatedMethod = getMobileViewAnnotatedMethodWithRouteName(methods, interceptedMethodRoute);
if (mobileViewAnnotatedMethod != null)
return invokeMethod(mobileViewAnnotatedMethod, interceptedMethodArgs, controllerName);
}
return continueInterceptedMethodExecution(joinPoint, interceptedMethodArgs);
}
private Object continueInterceptedMethodExecution(ProceedingJoinPoint joinPoint, Object[] interceptedMethodArgs) {
try {
return joinPoint.proceed(interceptedMethodArgs);
} catch (Throwable e) {
logger.error("unable to proceed with intercepted method call: " + e);
}
return null;
}
private Object[] getInterceptedMethodArgs(JoinPoint joinPoint) {
return joinPoint.getArgs();
}
private boolean callYourLogic(HttpServletRequest request) {
// INSERT HERE YOUR CUSTOM LOGIC (e.g.: is the server accessed from a mobile device?)
// THIS IS A STUPID LOGIC USED ONLY FOR EXAMPLE
return request.getParameter("mobile")!= null;
}
private HttpServletRequest getCurrentHttpRequest() {
return ((ServletRequestAttributes) RequestContextHolder.currentRequestAttributes()).getRequest();
}
private String invokeMethod(Method method, Object[] methodArgs, String className) {
if (method != null) {
try {
Object classInstance = getInstanceOfClass(method, className);
return (String) method.invoke(classInstance, methodArgs);
} catch (Exception e) {
logger.error("unable to invoke method" + method + " - " + e);
}
}
return null;
}
private Object getInstanceOfClass(Method method, String className) {
return applicationContext.getBean(className);
}
private Method getMobileViewAnnotatedMethodWithRouteName(Method[] methods, String routeName) {
for (Method m : methods) {
MobileView mobileViewAnnotation = m.getAnnotation(MobileView.class);
if (mobileViewAnnotation != null && mobileViewAnnotation.value().equals(routeName))
return m;
}
return null;
}
private String getRouteFromInterceptedMethod(Method method) {
RequestMapping requestMappingAnnotation = method.getAnnotation(RequestMapping.class);
if (requestMappingAnnotation != null)
return requestMappingAnnotation.value()[0];
return null;
}
private String getCurrentMethodName(JoinPoint joinPoint) {
return joinPoint.getSignature().getName();
}
private Method[] getAllControllerMethods(JoinPoint joinPoint) {
return joinPoint.getThis().getClass().getSuperclass().getMethods();
}
private String getSimpleClassNameWithFirstLetterLowercase(JoinPoint joinPoint) {
String simpleClassName = joinPoint.getThis().getClass().getSuperclass().getSimpleName();
return setFirstLetterLowercase(simpleClassName);
}
private String setFirstLetterLowercase(String simpleClassName) {
String firstLetterOfTheString = simpleClassName.substring(0, 1).toLowerCase();
String restOfTheString = simpleClassName.substring(1);
return firstLetterOfTheString + restOfTheString;
}
private Method getInterceptedMethod(Method[] methods, String lookingForMethodName) {
for (Method m : methods)
if (m.getName().equals(lookingForMethodName))
return m;
return null;
}
#Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
}

Resources