Receiving intermittent LRQ's in IGNITE, the same query runs perfectly fine the whole day. But receiving LRQ's sometimes and which create huge spike in response times and affect platform.
We are using Ignite version 2.7.5 and we are using partitioning with 256 partitions in each node and got two nodes which are communicating with each other in UDP multicast mode.
RAM is 210GB, persistence mode enabled and other configurations are as below:
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd">
<!-- Enable annotation-driven caching. -->
<bean name="noOpFailureHandler" class="org.apache.ignite.failure.NoOpFailureHandler"/>
<bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
<property name="peerClassLoadingEnabled" value="true"/>
<property name="igniteInstanceName" value="PincodeGrid"/>
<property name="clientMode" value="false"/>
<property name="failureDetectionTimeout" value="80000"/>
<property name="clientFailureDetectionTimeout" value="120000"/>
<property name="systemWorkerBlockedTimeout" value="30000" />
<property name="longQueryWarningTimeout" value="3000"/>
<property name="failureHandler" ref="noOpFailureHandler"/>
<property name="metricsLogFrequency" value="#{600 * 10 * 1000}"/>
<property name="rebalanceThreadPoolSize" value="16"/>
<property name="dataStorageConfiguration">
<bean class="org.apache.ignite.configuration.DataStorageConfiguration">
<!-- Redefining the default region's settings -->
<property name="pageSize" value="#{4 * 1024}"/>
<!--<property name="writeThrottlingEnabled" value="true"/>-->
<property name="defaultDataRegionConfiguration">
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="persistenceEnabled" value="true"/>
<property name="initialSize" value="#{105L * 1024 * 1024 * 1024}"/>
<property name="name" value="Default_Region"/>
<!--Setting the size of the default region to 4GB. -->
<property name="maxSize" value="#{120L * 1024 * 1024 * 1024}"/>
<property name="checkpointPageBufferSize"
value="#{4096L * 1024 * 1024}"/>
<!--<property name="pageEvictionMode" value="RANDOM_2_LRU"/>-->
</bean>
</property>
<property name="walPath" value="/wal/pincode"/>
<property name="walArchivePath" value="/wal/pincode/archive"/>
<property name="storagePath" value="/ignite/persistence"/>
<property name="checkpointFrequency" value="180000"/>
<property name="checkpointThreads" value="8"/>
<property name="walMode" value="BACKGROUND"/>
<property name="walSegmentSize" value="#{1L * 1024 * 1024 * 1024}"/>
<!--<property name="authenticationEnabled" value="true"/>-->
</bean>
</property>
<property name="discoverySpi">
<bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
<property name="ipFinder">
<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">
<property name="multicastGroup" value="224.0.0.180"/>
<property name="multicastPort" value="47514"/>
</bean>
</property>
</bean>
</property>
<property name="communicationSpi">
<bean class="org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi">
<property name="messageQueueLimit" value="2048"/>
<property name="socketWriteTimeout" value="10000"/>
<property name="connectionsPerNode" value="10"/>
<property name="usePairedConnections" value="true"/>
<property name="socketReceiveBuffer" value="#{64L * 1024}"/>
</bean>
</property>
</bean>
</beans>
And here is the query which is becoming an LRQ.
SPS__Z1.PRIORITY __C0_0,
LOGISTIC__Z2.CODFLAG __C0_1,
LOGISTIC__Z2.CODLIMIT __C0_2,
LOGISTIC__Z2.PREPAIDLIMIT __C0_3,
LOGISTIC__Z2.SLAVEID __C0_4,
STOCKROOM__Z0.ORDERCUTOFFTIMESDD __C0_5,
STOCKROOM__Z0.ORDERCUTOFFTIMEED __C0_6,
STOCKROOM__Z0.ORDERCUTOFFTIMEHD __C0_7,
LOGISTIC__Z2.LOGISTICSID __C0_8,
LOGISTIC__Z2.LOGISTICPRIORITY __C0_9
FROM "SellerPincodeServiceabilityCache".SELLERPINCODESERVICEABILITY SPS__Z1
INNER JOIN "LogisticsServiceabilityCache".LOGISTICSSERVICEABILITY LOGISTIC__Z2
ON TRUE
INNER JOIN "StockRoomLocationCache".STOCKROOMLOCATIONS STOCKROOM__Z0
ON TRUE
WHERE (LOGISTIC__Z2.PREPAIDLIMIT >= ?3) AND ((LOGISTIC__Z2.ISLOGISTICACTIVE = 'Y') AND ((LOGISTIC__Z2.ISFRAGILE = ?8) AND ((LOGISTIC__Z2.ISPRECIOUS = ?7) AND ((LOGISTIC__Z2.DELIVERYMODE = ?6) AND ((LOGISTIC__Z2.TRANSPORTMODE = ?5) AND ((LOGISTIC__Z2.DESTNPINCODE = ?1) AND ((STOCKROOM__Z0.ACTIVE = 'Y') AND ((STOCKROOM__Z0.SELLERID = ?2) AND ((STOCKROOM__Z0.SLAVEID = LOGISTIC__Z2.SLAVEID) AND ((SPS__Z1.SLAVEID = LOGISTIC__Z2.SLAVEID) AND ((SPS__Z1.SHIPMENTTYPE = ?4) AND ((SPS__Z1.DELIVERYTYPE = ?6) AND ((SPS__Z1.PINCODE = ?1) AND (SPS__Z1.SELLERID = ?2))))))))))))))
ORDER BY 1, 10', plan=
SELECT
SPS__Z1.PRIORITY AS __C0_0,
LOGISTIC__Z2.CODFLAG AS __C0_1,
LOGISTIC__Z2.CODLIMIT AS __C0_2,
LOGISTIC__Z2.PREPAIDLIMIT AS __C0_3,
LOGISTIC__Z2.SLAVEID AS __C0_4,
STOCKROOM__Z0.ORDERCUTOFFTIMESDD AS __C0_5,
STOCKROOM__Z0.ORDERCUTOFFTIMEED AS __C0_6,
STOCKROOM__Z0.ORDERCUTOFFTIMEHD AS __C0_7,
LOGISTIC__Z2.LOGISTICSID AS __C0_8,
LOGISTIC__Z2.LOGISTICPRIORITY AS __C0_9
FROM "SellerPincodeServiceabilityCache".SELLERPINCODESERVICEABILITY SPS__Z1
/* "SellerPincodeServiceabilityCache".SELLER_PINCODE_SERVICE_INDX: SHIPMENTTYPE = ?4
AND DELIVERYTYPE = ?6
AND PINCODE = ?1
AND SELLERID = ?2
*/
/* WHERE (SPS__Z1.SELLERID = ?2)
AND ((SPS__Z1.PINCODE = ?1)
AND ((SPS__Z1.SHIPMENTTYPE = ?4)
AND (SPS__Z1.DELIVERYTYPE = ?6)))
*/
INNER JOIN "LogisticsServiceabilityCache".LOGISTICSSERVICEABILITY LOGISTIC__Z2
/* "LogisticsServiceabilityCache".LOGISTICS_SERVICEABILITY_INDX: PREPAIDLIMIT >= ?3
AND ISLOGISTICACTIVE = 'Y'
AND ISFRAGILE = ?8
AND ISPRECIOUS = ?7
AND DELIVERYMODE = ?6
AND TRANSPORTMODE = ?5
AND DESTNPINCODE = ?1
AND SLAVEID = SPS__Z1.SLAVEID
*/
ON 1=1
/* WHERE (SPS__Z1.SLAVEID = LOGISTIC__Z2.SLAVEID)
AND ((LOGISTIC__Z2.DESTNPINCODE = ?1)
AND ((LOGISTIC__Z2.TRANSPORTMODE = ?5)
AND ((LOGISTIC__Z2.DELIVERYMODE = ?6)
AND ((LOGISTIC__Z2.ISPRECIOUS = ?7)
AND ((LOGISTIC__Z2.ISFRAGILE = ?8)
AND ((LOGISTIC__Z2.PREPAIDLIMIT >= ?3)
AND (LOGISTIC__Z2.ISLOGISTICACTIVE = 'Y')))))))
*/
INNER JOIN "StockRoomLocationCache".STOCKROOMLOCATIONS STOCKROOM__Z0
/* "StockRoomLocationCache".STOCKROOMLOCATIONS_SLAVEID_IDX: SLAVEID = LOGISTIC__Z2.SLAVEID */
ON 1=1
WHERE (LOGISTIC__Z2.PREPAIDLIMIT >= ?3)
AND ((LOGISTIC__Z2.ISLOGISTICACTIVE = 'Y')
AND ((LOGISTIC__Z2.ISFRAGILE = ?8)
AND ((LOGISTIC__Z2.ISPRECIOUS = ?7)
AND ((LOGISTIC__Z2.DELIVERYMODE = ?6)
AND ((LOGISTIC__Z2.TRANSPORTMODE = ?5)
AND ((LOGISTIC__Z2.DESTNPINCODE = ?1)
AND ((STOCKROOM__Z0.ACTIVE = 'Y')
AND ((STOCKROOM__Z0.SELLERID = ?2)
AND ((STOCKROOM__Z0.SLAVEID = LOGISTIC__Z2.SLAVEID)
AND ((SPS__Z1.SLAVEID = LOGISTIC__Z2.SLAVEID)
AND ((SPS__Z1.SHIPMENTTYPE = ?4)
AND ((SPS__Z1.DELIVERYTYPE = ?6)
AND ((SPS__Z1.PINCODE = ?1)
AND (SPS__Z1.SELLERID = ?2))))))))))))))
ORDER BY 1, 10
, parameters=[533233, 125112, 2480.0, TSHIP, SUR, HD, N, N]] ```
Maybe this query is genuinely slow? Did you check how many rows does it return typically and how long does it take? Maybe you have some outlier sellerId's for which it would run much longer?
Related
I have a map as below and wanted to retrieve the values of the inner map using the key.
<bean id="testMap" class="java.util.HashMap">
<constructor-arg>
<map>
<entry key="A" value="AA" />
<entry key="B">
<map>
<entry key="B1" value="B11" />
<entry key="B2" value="B22" />
<entry key="B3" value="B33" />
</map>
</entry>
</map>
</constructor-arg>
</bean>
below code prints the 1st level, how to print the inner map value?
myMap.entrySet().stream().filter(map -> map.getKey().equals("B")).forEach(p -> System.out.println(p));
The answer is less straightforward than the one most people with a nested map are looking for because you have a Map<String,Object>.
Recreating the map you describe:
Map<String,Object> map = new HashMap<>();
Map<String,String> bMap = new HashMap<>();
bMap.put("B1", "B11");
bMap.put("B2", "B22");
bMap.put("B3", "B33");
map.put("A", "AA");
map.put("B", bMap);
Then getting the values of the inner map:
map.entrySet().stream()
.filter(entry -> entry.getKey().equals("B"))
.flatMap(entry -> ((Map<String,String>) entry.getValue()).values().stream())
.forEach(System.out::println);
This prints:
B22
B33
B11
A shorter and faster solution was provided by Holger in the comments:
((Map<String,String>)map.get("B")).values().stream()
.forEach(System.out::println);
It does a single lookup of the map entry instead of the linear search in the above solution, i.e. it is faster.
Can you try this ?
myMap
.entrySet().stream()
.filter(map -> map.getKey().equals("B"))
.values().forEach(value -> System.out.println(value));
I am using Spring integration's "int-jdbc:inbound-channel-adapter" to fetch records from the DB. However after I fetch records I also need to update 2 columns
1) Status column
2) Timestamp columns
Updating the status column is not an issue as I can use below xml snippet
<int-jdbc:inbound-channel-adapter query="select * from item where status=2"
channel="target" data-source="dataSource"
update="update item set status=10 where id in (:id)" />
However when I try to update timestamp, it doesnt work
<int-jdbc:inbound-channel-adapter query="select * from item where status=2"
channel="target" data-source="dataSource"
update="update item set status=10,timestamp=:timestamp where id in (:id)"
update-sql-parameter-source-actory="timestampUpdaterSqlParameterSourceFactory">
<int-jdbc:inbound-channel-adapter>
<bean id="timestampUpdaterSqlParameterSourceFactory"
class="org.springframework.integration.jdbc.ExpressionEvaluatingSqlParameterSourceFactory" >
<property name="parameterExpressions">
<map>
<entry key="timestamp" value="#now"/>
</map>
</property>
</bean>
<bean id="now" scope="prototype" class="java.sql.Timestamp">
<constructor-arg value="#{ T(java.lang.System).currentTimeMillis()}" />
</bean>
We can use DB level methods to set the time like sysdate for oracle, but I am not keen on using DB specific methods in code for testing purposes(H2 DB used for testing)
Any Help is greatly appreciated
Thanks
I had the same issue, the problem is that :timestamp expression is evaluated as a Collection Projection, check the code here.
So my Original query was something like this
update table set status = 1, published_at = :now where id_event in (:id)
After the parsing was something like this
update table set status = 1, published_at = ?, ?, ? where id_event in (?, ?, ?)
The numbers of ? is the same as the number of results from the select statement. So if the result is more than one, you get a Bad Grammar exception.
I made a not very nice solution (intrusive) using spring-integration-java-dsl
protected void addNotCollectionProjectionExpression(
ExpressionEvaluatingSqlParameterSourceFactory factory,
String key, String expression) {
try {
Field parameterExpressionsField = factory.getClass().getDeclaredField("parameterExpressions");
parameterExpressionsField.setAccessible(true);
Map<String, Expression[]> parameterExpressions = (Map<String, Expression[]>) parameterExpressionsField
.get(factory);
Field parserField = factory.getClass().getDeclaredField("PARSER");
parserField.setAccessible(true);
ExpressionParser parser = (ExpressionParser) parserField.get(factory);
Expression compiledExpression = parser.parseExpression(expression);
Expression[] expressions = new Expression[]{
compiledExpression,
compiledExpression
};
parameterExpressions.put(key, expressions);
} catch (NoSuchFieldException | IllegalAccessException e) {
logger.error("Field parameterExpressions | PARSER can not be obtained", e);
}
}
....
//how to use it
ExpressionEvaluatingSqlParameterSourceFactory factory =
new ExpressionEvaluatingSqlParameterSourceFactory();
addNotCollectionProjectionExpression(factory, "now",
"T(com.example.MyClass).staticMethod()");
return factory;
You can notice that I am avoiding to use Collection Projection using the same expression in both elements of the array.
Mapping file:
<?xml version="1.0" encoding="utf-8" ?>
<hibernate-mapping xmlns="urn:nhibernate-mapping-2.2" assembly="OracleHibernateTest" namespace="OracleHibernateTest">
<class name="TableRow" table="TIMESTAMP_TEST_2">
<id name="Id" />
<property name="Time" type="Timestamp">
<column name="TIME" sql-type="TIMESTAMP(4)"></column>
</property>
</class>
</hibernate-mapping>
Code:
var configuration = new Configuration();
configuration.Configure();
var export = new SchemaExport(configuration);
export.Create((s) => Trace.WriteLine(s), true);
var sessionFactory = configuration.BuildSessionFactory();
using (var session = sessionFactory.OpenSession())
{
var testItem = new TableRow() { Id = (new Random().Next()), Time = new DateTime(2014, 8, 25, 5, 12, 4, 587) };
session.Save(testItem);
session.Flush();
}
Code above will insert new row, but milliseconds in Time column are truncated.
From investigation I have found that NHibernate will create OracleParameter for SQL command with parameter where OracleType is set to DateTime instead of TimeStamp.
In Oracle DateTime have no milliseconds.
After more investigation I tried to update Oracle client drivers (ODAC 12c Release 2 12.1.0.1.2 Xcopy) to latest version available and that solved my problem. Seems like issue with older Oracle drivers.
I'm using Spring 3.2 with Hibernate 4. In my DAO implementation I want to cache the results of native SQL query. The method to get results of this query looks like this:
public List<Object[]> getBestSellers(String category)
{
Session session = sessionFactory.getCurrentSession();
Query query = session.createSQLQuery( "SELECT i_id, i_title, a_fname, a_lname , SUM(ol_qty) AS val " +
"FROM " +
"orders, order_line, item, author " +
"WHERE " +
"order_line.ol_o_id = orders.o_id AND item.i_id = order_line.ol_i_id " +
"AND item.i_subject = :category AND item.i_a_id = author.a_id GROUP BY i_id " +
"ORDER BY orders.o_date, val DESC" );
query.setParameter( "category", category );
query.setMaxResults( 50 );
query.setCacheable( true );
List<Object[]> res = query.list();
return res;
}
It seems like that doesn't work and I don't know why.
I have configured the Hibernate in applicationContext.xml like this:
<props>
<prop key="hibernate.jdbc.batch_size">50</prop>
<prop key="hibernate.show_sql">false</prop>
<prop key="hibernate.dialect">${jdbc.hibernate.dialect}</prop>
<prop key="hibernate.max_fetch_depth">4</prop>
<prop key="hibernate.cache.use_second_level_cache">true</prop>
<prop key="hibernate.cache.use_query_cache">true</prop>
<prop key="hibernate.cache.region.factory_class">org.hibernate.cache.ehcache.EhCacheRegionFactory</prop>
<prop key="hibernate.cache.provider_configuration_file_resource_path">classpath:ehcache.xml</prop>
<prop key="hibernate.generate_statistics">true</prop>
</props>
and my ehcache.xml:
<?xml version="1.0" encoding="UTF-8"?>
<ehcache xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="http://ehcache.org/ehcache.xsd">
<diskStore path="java.io.tmpdir/Cloudscale-cache"/>
<defaultCache
eternal="false"
maxElementsInMemory="1000"
overflowToDisk="false"
diskPersistent="false"
timeToIdleSeconds="0"
timeToLiveSeconds="600"
memoryStoreEvictionPolicy="LRU"
/>
<cache
name="org.hibernate.cache.spi.UpdateTimestampsCache"
maxElementsInMemory="50"
eternal="false"
timeToIdleSeconds="0"
timeToLiveSeconds="86400"
overflowToDisk="true"/>
<cache
name="org.hibernate.cache.internal.StandardQueryCache"
maxElementsInMemory="50"
eternal="false"
timeToIdleSeconds="0"
timeToLiveSeconds="86400"
overflowToDisk="true"/>
</ehcache>
if you want to cache native sql queries then you have to use addScalar()
by using addScalar() ;hibernate will try to converted result of sql query into return objects for individual named columns, rather than entities.
Modify your query as below
Query query = session.createSQLQuery( "SELECT i_id as a, i_title as b, a_fname as c, a_lname as d, SUM(ol_qty) AS val " +
"FROM " +
"orders, order_line, item, author " +
"WHERE " +
"order_line.ol_o_id = orders.o_id AND item.i_id = order_line.ol_i_id " +
"AND item.i_subject = :category AND item.i_a_id = author.a_id GROUP BY i_id " +
"ORDER BY orders.o_date, val DESC" ).addScalar("a").addScalar("b").addScalar("c").addScalar("d").addScalar("val");
I have a question about making a xpath expression for filtering resources by a property of type inputStream called data.
How can I do a text search, for example this is working:
String xpath1 = "<my app path>//element(*, nt:resource) [jcr:contains(#jcr:mimeType,'*plain*')]";
String xpath2 = "<my app path>//element(*, nt:resource) [jcr:contains(#jcr:encoding,'*utf*')]";
But this is not working.
String xpath3 = "<my app path>//element(*, nt:resource) [jcr:contains(#jcr:data,'*plain*')]";
The really fact is that we use some custom nodes, let's explain the properties definitions:
In Java Terms...
public class Resource extends BaseNode {
/** Encoding media type. It cannot be null or empty. */
#Field(jcrName = "jcr:encoding", jcrDefaultValue = "")
private String encoding;
/** Resource's MIME type. It cannot be null or empty. */
#Field(jcrName="jcr:mimeType", jcrDefaultValue = "")
private String mimeType;
/** Resource's size (bytes). */
#Field(jcrName="skl:size")
private long size;
/** Resource's content data as stream. It cannot be null. */
#Field(jcrName="jcr:data")
private InputStream data;
...
}
#Node(jcrType = "baseNode", isAbstract = true)
public abstract class BaseNode {
#Field(jcrName = "name", id = true)
protected String name;
#Field(jcrName = "creationDate")
protected Date creationDate;
...
}
And in JackRabbit Terms...
<!-- Base node type definition -->
<nodeType name="docs:baseNode"
isMixin="false"
hasOrderableChildNodes="false" >
<supertypes>
<supertype>nt:hierarchyNode</supertype>
</supertypes>
<propertyDefinition name="docs:name"
requiredType="String"
autoCreated="false"
mandatory="true"
onParentVersion="COPY"
protected="false"
multiple="false" />
<propertyDefinition name="docs:searchPath"
requiredType="String"
autoCreated="false"
mandatory="false"
onParentVersion="COPY"
protected="false"
multiple="false" />
<propertyDefinition name="docs:creationDate"
requiredType="Date"
autoCreated="false"
mandatory="true"
onParentVersion="COPY"
protected="false"
multiple="false" />
<propertyDefinition name="docs:lastModified"
requiredType="Date"
autoCreated="false"
mandatory="true"
onParentVersion="COPY"
protected="false"
multiple="false" />
<childNodeDefinition name="*"
defaultPrimaryType="docs:baseNode"
autoCreated="false"
mandatory="false"
onParentVersion="COPY"
protected="false"
sameNameSiblings="false">
<requiredPrimaryTypes>
<requiredPrimaryType>docs:baseNode</requiredPrimaryType>
</requiredPrimaryTypes>
</childNodeDefinition>
</nodeType>
<!-- Resource node type definition -->
<nodeType name="skl:resource"
isMixin="false"
hasOrderableChildNodes="false" >
<supertypes>
<supertype>docs:baseNode</supertype>
<supertype>nt:resource</supertype>
</supertypes>
<propertyDefinition name="skl:size"
requiredType="Long"
autoCreated="false"
mandatory="true"
onParentVersion="COPY"
protected="false"
multiple="false" />
<propertyDefinition name="skl:externalUri"
requiredType="String"
autoCreated="false"
mandatory="false"
onParentVersion="COPY"
protected="false"
multiple="false" />
</nodeType>
The point is, how do I do this query in order to filter by the jcr:data property.
I think you have to turn on text extraction so that the searchable text from your "jcr:data" property will be indexed. See this email thread on the Jackrabbit discussion list.
BTW, the JCR CND format is a much more compact way of describing your node types.