Empty karma html report - jasmine

Here are the facts:
I have a nodejs app
I've written some Jasmine tests
I'm using karma to generate the code coverage report
Here is the karma.conf.js:
module.exports = function (config) {
config.set({
frameworks: ['jasmine', 'browserify'],
files: [
'utils/dates.js',
'utils/maths.js',
'spec/server_utils_spec.js'
],
preprocessors: {
'utils/*.js': ['coverage', 'browserify'],
'spec/server_utils_spec.js': ['coverage', 'browserify']
},
reporters: ['progress', 'coverage'],
coverageReporter: {
type: 'html',
dir: 'coverage/'
},
colors: true,
logLevel: config.LOG_INFO,
autoWatch: true,
singleRun: false
})
}
These are my packages (I know they are global):
→ npm -g list | grep -i "karma"
├─┬ karma#2.0.4
├─┬ karma-browserify#5.3.0
├─┬ karma-coverage#1.1.2
├─┬ karma-html-reporter#0.2.7
├── karma-jasmine#1.1.2
├─┬ karma-mocha#1.3.0
and despite the terminal log
/usr/bin/karma start server/karma.conf.js
23 07 2018 15:39:36.033:INFO [framework.browserify]: registering rebuild (autoWatch=true)
23 07 2018 15:39:36.817:INFO [framework.browserify]: 3073 bytes written (0.03 seconds)
23 07 2018 15:39:36.817:INFO [framework.browserify]: bundle built
23 07 2018 15:39:36.818:WARN [karma]: No captured browser, open http://localhost:9876/
23 07 2018 15:39:36.822:INFO [karma]: Karma v2.0.4 server started at http://0.0.0.0:9876/
23 07 2018 15:39:39.135:INFO [Chrome 67.0.3396 (Linux 0.0.0)]: Connected on socket H9J96y-fFn-7PfkHAAAA with id manual-9692
Chrome 67.0.3396 (Linux 0.0.0): Executed 0 of 2 SUCCESS (0 secs / 0 secs)
Chrome 67.0.3396 (Linux 0.0.0): Executed 1 of 2 SUCCESS (0 secs / 0.001 secs)
Chrome 67.0.3396 (Linux 0.0.0): Executed 2 of 2 SUCCESS (0 secs / 0.002 secs)
Chrome 67.0.3396 (Linux 0.0.0): Executed 2 of 2 SUCCESS (0.033 secs / 0.002 secs)
TOTAL: 2 SUCCESS
I get this result when I open the generated html report:
I've tried the solutions to all the other related questions and I've come up with nothing.
My questions:
why the empty report?
how can I manage to have the report
Thank you in advance!

Related

socket.io server fails in scale test with artillery

I am executing the scale on socket.io using artillery using the steps mentioned in the blog - https://www.artillery.io/blog/load-testing-socketio-with-artillery with following modification (adding more users)
config:
target: "http://localhost:3000"
phases:
- duration: 60
arrivalRate: 5
- duration: 60
arrivalRate: 5
rampTo: 50
I am executing the test against the code available at https://github.com/socketio/socket.io/tree/main/examples/chat
Here is the findings
engine.socketio.emit: .......................................................... 1877
engine.socketio.emit_rate: ..................................................... 2/sec
errors.ECONNREFUSED: ........................................................... 111
errors.Error: xhr poll error: .................................................. 591
errors.Error: xhr post error: .................................................. 388
vusers.completed: .............................................................. 860
vusers.created: ................................................................ 1950
vusers.created_by_name.A chatty user: .......................................... 188
vusers.created_by_name.A mostly quiet user: .................................... 282
vusers.created_by_name.A user that just lurks: ................................. 1480
vusers.failed: ................................................................. 1090
vusers.session_length:
min: ......................................................................... 60007.8
max: ......................................................................... 174740.5
median: ...................................................................... 64236
p95: ......................................................................... 161191.7
p99: ......................................................................... 171159.6
Any idea, why it is failing for so many users?

running gulp inside a Laravel project fails when mixing styles and/or compiling sass files

Everything was working and i was capable to run gulp and gulp watch normally, after some changes in my phpspec files it dosn't work anymore. See the details below.
This is my gulpfile.js
var elixir = require('laravel-elixir');
elixir(function (mix) {
mix.phpUnit().phpSpec();
// this task should look for files inside the resources/assets/sass
// folder.
mix.sass(
[
'./resources/assets/bower/bootstrap-sass/assets/stylesheets/_bootstrap.scss',
'**',
],
'resources/assets/css/sass.css'
);
// this task should look for files inside the resources/assets/css
// folder.
mix.styles(
[
'**',
],
'public/css/all.css'
);
// this task should look for files inside the resources/assets/js
// folder.
mix.scripts(
[
'./resources/assets/bower/jquery/dist/jquery.js',
'./resources/assets/bower/bootstrap-sass/assets/javascripts/bootstrap.js',
'**',
],
'public/js/app.js'
);
mix.version(
[
"css/all.css",
"js/app.js",
]
);
mix.browserSync(
{
proxy: "localhost:8000",
}
);
});
And this is the output after runing gulp:
[19:00:44] Using gulpfile ~/Public/forsale/gulpfile.js
[19:00:44] Starting 'default'...
[19:00:44] Starting 'PHPUnit'...
Triggering PHPUnit: vendor/bin/phpunit --verbose
[19:00:44] Finished 'default' after 44 ms
PHPUnit 4.8.26 by Sebastian Bergmann and contributors.
Runtime: PHP 7.0.4-7ubuntu2.1
Configuration: /home/rafael/Public/forsale/phpunit.xml
.
Time: 49 ms, Memory: 8.00MB
OK (1 test, 1 assertion)
[19:00:44] gulp-notify: [Green!] Your PHPUnit tests passed!
[19:00:44] Finished 'PHPUnit' after 144 ms
[19:00:44] Starting 'PHPSpec'...
Triggering PHPSpec: vendor/bin/phpspec run
/ skipped: 0% / pending: 0% / passed: 100% / failed: 0% / broken: 0% / 2 examples
1 specs
2 examples (2 passed)
5ms
[19:00:44] gulp-notify: [Green!] Your PHPSpec tests passed!
[19:00:44] Finished 'PHPSpec' after 84 ms
[19:00:44] Starting 'sass'...
Fetching Sass Source Files...
- ./resources/assets/bower/bootstrap-sass/assets/stylesheets/_bootstrap.scss
Saving To...
- resources/assets/css/sass.css
[19:00:44] 'sass' errored after 104 ms
[19:00:44] Error: Cannot find module './flex-Spec'
at Function.Module._resolveFilename (module.js:326:15)
at Function.Module._load (module.js:277:25)
at Module.require (module.js:354:17)
at require (internal/module.js:12:17)
at Object.<anonymous> (/home/rafael/Public/forsale/node_modules/autoprefixer-core/lib/hacks/flex.js:6:14)
at Object.<anonymous> (/home/rafael/Public/forsale/node_modules/autoprefixer-core/lib/hacks/flex.js:58:4)
at Module._compile (module.js:410:26)
at Object.Module._extensions..js (module.js:417:10)
at Module.load (module.js:344:32)
at Function.Module._load (module.js:301:12)
[19:00:44] Error in plugin 'run-sequence(sass)'
Message:
sass catch
It seems that run-sequence has a missing module, but i didn't find it.
The problem was solved, i removed my node_modules folder and applied npm install && npm update again. It looks like one of my libraries were damaged and i don't know why.

Make Cygnus use WebHDFS to write to local HDFS

I'm trying to make a local Orion+Cygnus persist Orion's data on a local HDFS through WebHDFS.
On Cygnus' instructions on gitub, very little is mentioned about WebHDFS, as the configuration is more about HttpFS.
On the .md OrionHDFSsink it's said that hdfs_port=50070 is for WebHDFS, as indeed my HDFS is. So I would expect by setting the port this way cygnus would automatically use WebHDFS, but on my case it doesn't seem to be working this way.
So, here's my agent_1.conf:
cygnusagent.sources = http-source
cygnusagent.sinks = hdfs-sink
cygnusagent.channels = hdfs-channel
# source configuration
cygnusagent.sources.http-source.channels = hdfs-channel
cygnusagent.sources.http-source.type = org.apache.flume.source.http.HTTPSource
cygnusagent.sources.http-source.port = 5050
cygnusagent.sources.http-source.handler = com.telefonica.iot.cygnus.handlers.OrionRestHandler
cygnusagent.sources.http-source.handler.notification_target = /notify
cygnusagent.sources.http-source.handler.default_service = def_serv
cygnusagent.sources.http-source.handler.default_service_path = def_servpath
cygnusagent.sources.http-source.handler.events_ttl = 4
cygnusagent.sources.http-source.interceptors = ts gi
cygnusagent.sources.http-source.interceptors.ts.type = timestamp
cygnusagent.sources.http-source.interceptors.gi.type = com.telefonica.iot.cygnus.interceptors.GroupingInterceptor$Builder
cygnusagent.sources.http-source.interceptors.gi.grouping_rules_conf_file = /usr/cygnus/conf/grouping_rules.conf
# OrionHDFSSink configuration
cygnusagent.sinks.hdfs-sink.channel = hdfs-channel
cygnusagent.sinks.hdfs-sink.type = com.telefonica.iot.cygnus.sinks.OrionHDFSSink
cygnusagent.sinks.hdfs-sink.hdfs_host = localHDFS.ip
cygnusagent.sinks.hdfs-sink.hdfs_port = 50070
cygnusagent.sinks.hdfs-sink.hdfs_username = HDFSrootUser
cygnusagent.sinks.hdfs-sink.attr_persistence = column
# hdfs-channel configuration
cygnusagent.channels.hdfs-channel.type = memory
cygnusagent.channels.hdfs-channel.capacity = 1000
cygnusagent.channels.hdfs-channel.transactionCapacity = 100
When I update an Entity on Orion, to whom Cygnus is subbed, Cygnus logs the following:
02 Sep 2015 20:09:12,353 INFO [2055470757#qtp-1523539038-0] (com.telefonica.iot.cygnus.handlers.OrionRestHandler.getEvents:150) - Starting transaction (1441217314-956-0000000000)
02 Sep 2015 20:09:12,362 INFO [2055470757#qtp-1523539038-0] (com.telefonica.iot.cygnus.handlers.OrionRestHandler.getEvents:236) - Received data ({ "subscriptionId" : "55e735c9b89e8535f8ca5ef2", "originator" : "localhost", "contextResponses" : [ { "contextElement" : { "type" : "Reading", "isPattern" : "false", "id" : "Reading1.1", "attributes" : [ { "name" : "Cost", "type" : "double", "value" : "32" }, { "name" : "Reading_ID", "type" : "integer", "value" : "14" }, { "name" : "Threshold", "type" : "double", "value" : "30" }, { "name" : "email", "type" : "string", "value" : "arthurmvieira#hotmail.com" } ] }, "statusCode" : { "code" : "200", "reasonPhrase" : "OK" } } ]})
02 Sep 2015 20:09:12,366 INFO [2055470757#qtp-1523539038-0] (com.telefonica.iot.cygnus.handlers.OrionRestHandler.getEvents:258) - Event put in the channel (id=2020008711, ttl=4)
02 Sep 2015 20:09:12,432 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:128) - Event got from the channel (id=2020008711, headers={fiware-servicepath=def_servpath, destination=reading1.1_reading, content-type=application/json, fiware-service=def_serv, ttl=4, transactionId=1441217314-956-0000000000, timestamp=1441217352368}, bodyLength=812)
02 Sep 2015 20:09:12,549 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionHDFSSink.persist:356) - [hdfs-sink] Persisting data at OrionHDFSSink. HDFS file (def_serv/def_servpath/reading1.1_reading/reading1.1_reading.txt), Data ({"recvTime":"2015-09-02T18:09:12.368Z","Cost":"32", "Cost_md":[],"Reading_ID":"14", "Reading_ID_md":[],"Threshold":"30", "Threshold_md":[],"email":"arthurmvieira#hotmail.com", "email_md":[]})
02 Sep 2015 20:09:12,557 ERROR [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:143) - Persistence error (The /user/root/def_serv/def_servpath/reading1.1_reading directory could not be created in HDFS. HttpFS response: 503 Service unavailable)
02 Sep 2015 20:09:12,558 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:173) - An event was put again in the channel (id=2020008711, ttl=3)
02 Sep 2015 20:09:12,558 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:193) - Finishing transaction (1441217314-956-0000000000)
02 Sep 2015 20:09:13,560 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:128) - Event got from the channel (id=2020008711, headers={fiware-servicepath=def_servpath, destination=reading1.1_reading, content-type=application/json, fiware-service=def_serv, ttl=3, transactionId=1441217314-956-0000000000, timestamp=1441217352368}, bodyLength=812)
02 Sep 2015 20:09:13,574 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionHDFSSink.persist:356) - [hdfs-sink] Persisting data at OrionHDFSSink. HDFS file (def_serv/def_servpath/reading1.1_reading/reading1.1_reading.txt), Data ({"recvTime":"2015-09-02T18:09:12.368Z","Cost":"32", "Cost_md":[],"Reading_ID":"14", "Reading_ID_md":[],"Threshold":"30", "Threshold_md":[],"email":"arthurmvieira#hotmail.com", "email_md":[]})
02 Sep 2015 20:09:13,574 ERROR [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:143) - Persistence error (The /user/root/def_serv/def_servpath/reading1.1_reading directory could not be created in HDFS. HttpFS response: 503 Service unavailable)
02 Sep 2015 20:09:13,575 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:173) - An event was put again in the channel (id=2020008711, ttl=2)
02 Sep 2015 20:09:13,575 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:193) - Finishing transaction (1441217314-956-0000000000)
02 Sep 2015 20:09:15,576 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:128) - Event got from the channel (id=2020008711, headers={fiware-servicepath=def_servpath, destination=reading1.1_reading, content-type=application/json, fiware-service=def_serv, ttl=2, transactionId=1441217314-956-0000000000, timestamp=1441217352368}, bodyLength=812)
02 Sep 2015 20:09:15,590 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionHDFSSink.persist:356) - [hdfs-sink] Persisting data at OrionHDFSSink. HDFS file (def_serv/def_servpath/reading1.1_reading/reading1.1_reading.txt), Data ({"recvTime":"2015-09-02T18:09:12.368Z","Cost":"32", "Cost_md":[],"Reading_ID":"14", "Reading_ID_md":[],"Threshold":"30", "Threshold_md":[],"email":"arthurmvieira#hotmail.com", "email_md":[]})
02 Sep 2015 20:09:15,599 ERROR [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:143) - Persistence error (The /user/root/def_serv/def_servpath/reading1.1_reading directory could not be created in HDFS. HttpFS response: 503 Service unavailable)
02 Sep 2015 20:09:15,600 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:173) - An event was put again in the channel (id=2020008711, ttl=1)
02 Sep 2015 20:09:15,600 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:193) - Finishing transaction (1441217314-956-0000000000)
02 Sep 2015 20:09:18,601 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:128) - Event got from the channel (id=2020008711, headers={fiware-servicepath=def_servpath, destination=reading1.1_reading, content-type=application/json, fiware-service=def_serv, ttl=1, transactionId=1441217314-956-0000000000, timestamp=1441217352368}, bodyLength=812)
02 Sep 2015 20:09:18,615 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionHDFSSink.persist:356) - [hdfs-sink] Persisting data at OrionHDFSSink. HDFS file (def_serv/def_servpath/reading1.1_reading/reading1.1_reading.txt), Data ({"recvTime":"2015-09-02T18:09:12.368Z","Cost":"32", "Cost_md":[],"Reading_ID":"14", "Reading_ID_md":[],"Threshold":"30", "Threshold_md":[],"email":"arthurmvieira#hotmail.com", "email_md":[]})
02 Sep 2015 20:09:18,618 ERROR [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:143) - Persistence error (The /user/root/def_serv/def_servpath/reading1.1_reading directory could not be created in HDFS. HttpFS response: 503 Service unavailable)
02 Sep 2015 20:09:18,621 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:173) - An event was put again in the channel (id=2020008711, ttl=0)
02 Sep 2015 20:09:18,621 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:193) - Finishing transaction (1441217314-956-0000000000)
02 Sep 2015 20:09:22,622 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:128) - Event got from the channel (id=2020008711, headers={fiware-servicepath=def_servpath, destination=reading1.1_reading, content-type=application/json, fiware-service=def_serv, ttl=0, transactionId=1441217314-956-0000000000, timestamp=1441217352368}, bodyLength=812)
02 Sep 2015 20:09:22,635 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionHDFSSink.persist:356) - [hdfs-sink] Persisting data at OrionHDFSSink. HDFS file (def_serv/def_servpath/reading1.1_reading/reading1.1_reading.txt), Data ({"recvTime":"2015-09-02T18:09:12.368Z","Cost":"32", "Cost_md":[],"Reading_ID":"14", "Reading_ID_md":[],"Threshold":"30", "Threshold_md":[],"email":"arthurmvieira#hotmail.com", "email_md":[]})
02 Sep 2015 20:09:22,635 ERROR [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:143) - Persistence error (The /user/root/def_serv/def_servpath/reading1.1_reading directory could not be created in HDFS. HttpFS response: 503 Service unavailable)
02 Sep 2015 20:09:22,635 WARN [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:163) - The event TTL has expired, it is no more re-injected in the channel (id=2020008711, ttl=0)
02 Sep 2015 20:09:22,635 INFO [SinkRunner-PollingRunner-DefaultSinkProcessor] (com.telefonica.iot.cygnus.sinks.OrionSink.process:193) - Finishing transaction (1441217314-956-0000000000)
So you can see it's trying to use HttpFS, as it logs the response:
HttpFS response: 503 Service unavailable
...on each writing try.
How should I configure the agent to use WebHDFS?
Thank you
I don't know what was happening, but the configuration mentioned is correct and is working now.
After several tries at rebooting the instance, rewriting the config files and other log errors than the one mentioned, it worked.
At some point Cygnus was trying to write to localhost:50075, instead of {localHDFS.ip}:50070, but that was gone after rebooting cygnus.
All instances are at their latest version (important).
Cygnus configuration for WebHDFS is just about setting the port to 50070, nothing else is required.
Regarding the connections you mention to 50075, they are correct as well, since that's the behaviour of WebHDFS: when you want to upload data to HDFS, first the client (in this case, Cygnus) accesses the Namenode through TCP/50070 port, then the namenode responds with a redirection location pointing to the datanode where the data will be effectively uploaded; such a redirection uses the TCP/50075 port, and thus that datanode:50075 must be accessible by the client (Cygnus). That's why we are using HttpFS in the global instance of Cosmos at FIWARE Lab: HttpFS works as a gateway hiding the details of the datanodes, and a single entry point and port (14000) is required.

pacemaker+corosync+DRBD 'unknown error' (1)

i have configured DRBD,corosync,pacemaker in mysystem..it works well when i have only one resource group. but i face the below problem when i added one more resource group.
crm_mon -f // shows below error
Migration summary:
* Node cent64asf1:
* Node cent64asf2:
r1_fs: migration-threshold=1000000 fail-count=1000000 last-failure='Sat Aug 23 18:00:45 2014'
Failed actions:
r1_fs_start_0 on cent64asf2 'unknown error' (1): call=98, status=complete, last-rc-change='Sat Aug 23 18:00:45 2014', queued=107ms, exec=0ms

Where's my time going mysteriously?

I have a ruby script that uses rsolr rubygem to generate XMLs and POST them to Apache Solr (javadoc Update Command) surfaced by Jetty Server. My script logs certain time using the following code
405 unless docs.empty?
406 begin
407 log.info("Adding to solr")
408 response = solr.add(docs)
409 log.info("#{(id_2*100.0)/last_id}% Done")
410 if response['responseHeader']['status'] != 0
411 log.fatal("Document ids not sent")
412 #log.fatal(Solr::Request::AddDocument.new(docs_single).to_s)
413 log.close
414 exit
415 end
416 log.info("#{Time.now.to_f - starttime}s to feed Solr. #{id_1} to #{id_2}")
417 rescue Exception => e
418 log.fatal("Document ids not sent => ")
419 #log.fatal(Solr::Request::AddDocument.new(docs_single).to_s)
420 #log.fatal(docs)
421 log.close
422 exit
423 end
The log generated goes like
I, [2011-10-09T15:03:42.617048 #30092] INFO -- : Executing - SELECT * FROM solr_feeddata_2 WHERE id >= 5879999 AND id < 5881999
I, [2011-10-09T15:03:44.086661 #30092] INFO -- : External Data1 fetch time: 1.45462989807129
I, [2011-10-09T15:03:44.109514 #30092] INFO -- : External Data2 fetch time: 0.0226790904998779
I, [2011-10-09T15:03:44.109611 #30092] INFO -- : 1.49255704879761s to fetch details from database. 5879999 to 5881999
I, [2011-10-09T15:03:44.109702 #30092] INFO -- : Adding data1, data2, building docs
I, [2011-10-09T15:03:45.912603 #30092] INFO -- : 3.29554414749146s to build documents. 5879999 to 5881999
I, [2011-10-09T15:03:45.912730 #30092] INFO -- : Adding to solr
I, [2011-10-09T15:04:24.797620 #30092] INFO -- : 61.180855194502% Done
I, [2011-10-09T15:04:24.797744 #30092] INFO -- : 42.180694103241s to feed Solr. 5879999 to 5881999
According to this log, Solr took (42.18 - 3.29 - 1.49 - 2) 35.4s to respond. (See below comment)
At the same time my Solr log for this particular update goes like
INFO: {add=[5879999, 5880000, 5880001, 5880002, 5880003, 5880004, 5880005, 5880007, ... (1468 adds)]} 0 5780
Oct 9, 2011 3:04:24 PM org.apache.solr.core.SolrCore execute
INFO: [core0] webapp=/solr path=/update params={wt=ruby} status=0 QTime=5780
Oct 9, 2011 3:04:42 PM org.apache.solr.update.processor.LogUpdateProcessor finish
This clearly shows that Solr took 5.78s to add the docs, initiate response send and closed the log updater.
Both the services run on same machine, inside the network, and their ping summary is
rtt min/avg/max/mdev = 0.008/0.010/0.022/0.006 ms
This pattern is clearly visible for every batch data processed. Despite my sincere efforts to get this mystery out, I am not able to get the reason for this behavior.
My Solr mergeFactor is 10, autoCommit is off.

Resources