I am trying to use gulp exec to run a bash script with gulp that updates assets in the project. The directory of the script is as follows:
./assets/update_assets.sh
I have followed the documentation and my gulpfile look as follows:
'use-strict';
var gulp = require('gulp'),
gutil = require('gulp-util');
exec = require('child_process').exec;
gulp.task('update-assets', function (cb) {
exec('./assets/update_assets.sh', function(err, stdout, stderr) {
console.log(stdout);
console.log(stderr);
cb(err);
});
});
gulp.task('default', ['update-assets'], function() {
gulp.start('update-assets');
});
When I try and run gulp i get errors:
[15:13:47] Error: Command failed: /bin/sh -c ./assets/update_assets.sh
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 197 0 --:--:-- --:--:-- --:--:-- 197
tar: Unrecognized archive format
tar: Error exit delayed from previous errors.
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 288 0 --:--:-- --:--:-- --:--:-- 288
tar: could not chdir to 'README.md/'
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 265 0 --:--:-- --:--:-- --:--:-- 264
tar: Unrecognized archive format
tar: Error exit delayed from previous errors.
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 276 0 --:--:-- --:--:-- --:--:-- 276
tar: could not chdir to 'gulpfile.js/'
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 283 0 --:--:-- --:--:-- --:--:-- 282
tar: Unrecognized archive format
tar: Error exit delayed from previous errors.
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 168 100 168 0 0 268 0 --:--:-- --:--:-- --:--:-- 268
tar: could not chdir to 'package.json/'
at ChildProcess.exithandler (child_process.js:744:12)
at ChildProcess.emit (events.js:110:17)
at maybeClose (child_process.js:1008:16)
at Socket.<anonymous> (child_process.js:1176:11)
at Socket.emit (events.js:107:17)
at Pipe.close (net.js:476:12)
Related
While running this command
curl -O https://raw.githubusercontent.com/Homebrew/install/master/install.sh
I am getting this
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- 0:01:15 --:--:-- 0
curl: (35) getpeername() failed with errno 22: Invalid argument
I am getting ElasticsearchStatusWarning saying that the cluster state is yellow. Upon running the health check API, I see below
curl -X GET http://localhost:9200/_cluster/health/
{"cluster_name":"my-elasticsearch","status":"yellow","timed_out":false,"number_of_nodes":8,"number_of_data_nodes":3,"active_primary_shards":220,"active_shards":438,"relocating_shards":0,"initializing_shards":2,"unassigned_shards":0,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":99.54545454545455}
initializing_shards is 2. So, I further run the below call
curl -X GET
http://localhost:9200/_cat/shards?h=index,shard,prirep,state,unassigned.reason
|grep INIT
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 33457 100 33457 0 graph_vertex_24_18549 0 r INITIALIZING ALLOCATION_FAILED
0 79609 0 --:--:-- --:--:-- --:--:-- 79659
curl -X GET http://localhost:9200/_cat/shards/graph_vertex_24_18549
graph_vertex_24_18549 0 p STARTED 8373375 8.4gb IP1 elasticsearch-data-1
graph_vertex_24_18549 0 r INITIALIZING IP2 elasticsearch-data-2
And rerunning the same command in few mins, shows now it's being initialized in elasticsearch-data-0. See below
graph_vertex_24_18549 0 p STARTED 8373375 8.4gb IP1 elasticsearch-data-1
graph_vertex_24_18549 0 r INITIALIZING IP0 elasticsearch-data-0
If i rerun it again in few mins, I can see it's again being initialized in elasticsearch-data-2 again. But it never gets STARTED.
curl -X GET http://localhost:9200/_cat/allocation?v
shards disk.indices disk.used disk.avail disk.total disk.percent host ip node
147 162.2gb 183.8gb 308.1gb 492gb 37 IP1 IP1 elasticsearch-data-2
146 217.3gb 234.2gb 257.7gb 492gb 47 IP2 IP2 elasticsearch-data-1
147 216.6gb 231.2gb 260.7gb 492gb 47 IP3 IP3 elasticsearch-data-0
curl -X GET http://localhost:9200/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
IP1 7 77 20 4.17 4.57 4.88 mi - elasticsearch-master-2
IP2 72 59 7 2.59 2.38 2.19 i - elasticsearch-5f4bd5b88f-4lvxz
IP3 57 49 3 0.75 1.13 1.09 di - elasticsearch-data-2
IP4 63 57 21 2.69 3.58 4.11 di - elasticsearch-data-0
IP5 5 59 7 2.59 2.38 2.19 mi - elasticsearch-master-0
IP6 69 53 13 4.67 4.60 4.66 di - elasticsearch-data-1
IP7 8 70 14 2.86 3.20 3.09 mi * elasticsearch-master-1
IP8 30 77 20 4.17 4.57 4.88 i - elasticsearch-5f4bd5b88f-wnrl4
curl -s -XGET http://localhost:9200/_cluster/allocation/explain -d '{
"index": "graph_vertex_24_18549", "shard":
0, "primary": false }' -H 'Content-type: application/json'
{"index":"graph_vertex_24_18549","shard":0,"primary":false,"current_state":"initializing","unassigned_info":{"reason":"ALLOCATION_FAILED","at":"2020-11-04T08:21:45.756Z","failed_allocation_attempts":1,"details":"failed shard on node [1XEXS92jTK-wwanNgQrxsA]: failed to perform indices:data/write/bulk[s] on replica [graph_vertex_24_18549][0], node[1XEXS92jTK-wwanNgQrxsA], [R], s[STARTED], a[id=RnTOlfQuQkOumVuw_NeuTw], failure RemoteTransportException[[elasticsearch-data-2][IP:9300][indices:data/write/bulk[s][r]]]; nested: CircuitBreakingException[[parent] Data too large, data for [<transport_request>] would be [4322682690/4gb], which is larger than the limit of [4005632409/3.7gb], real usage: [3646987112/3.3gb], new bytes reserved: [675695578/644.3mb]]; ","last_allocation_status":"no_attempt"},"current_node":{"id":"o_9jyrmOSca9T12J4bY0Nw","name":"elasticsearch-data-0","transport_address":"IP:9300"},"explanation":"the shard is in the process of initializing on node [elasticsearch-data-0], wait until initialization has completed"}
Thing is I was earlier getting alerted for Unassigned Shards due to the same exception as above - "CircuitBreakingException[[parent] Data too large, data for [<transport_request>] would be [4322682690/4gb], which is larger than the limit of [4005632409/3.7gb]"
But back then heap was only 2G. I increased it to 4G. And now I am seeing same error, but this time with respect to Initialising shards instead of Unallocated shards.
How can I remediate this?
[!] /usr/bin/curl -f -L -o /var/folders/1x/jmv798095x1fbjc_6128mflh0000gn/T/d20170314-59599-7fjizg/file.zip https://github.com/realm/SwiftLint/releases/download/0.16.1/portable_swiftlint.zip --create-dirs --netrc
% Total % Received % Xferd Average Speed Time Time Time
Current
Dload Upload Total Spent Left Speed 100 599 0 599 0 0 166 0 --:--:-- 0:00:03
--:--:-- 166 0 0 0 0 0 0 0 0 --:--:-- 0:01:20 --:--:-- 0curl: (7) Failed to connect to
github-cloud.s3.amazonaws.com port 443: Operation timed out
This is the error when i execute pod install. My network is under socks5 proxy. I can download https://github.com/realm/SwiftLint/releases/download/0.16.1/portable_swiftlint.zip easily. But how can i pod success?
My shell script uploads files to a server. I'd like the stdout and stderr to write to both a file and the console. BUT I don't want the progress bar/percentage which is stderr to go to file. I only want curl errors to write to file.
Initially I had this
curl ... 2>> "$log"
This wrote nice neat 1 or more lines of the download to the log file, but nothing to console.
I then changed it to
curl ... 3>&1 1>&2 2>&3 | tee -a "$log"
This wrote to both console and file, yay! except it wrote the whole progress for each percentage to the file, making the log file very large and tedious to read.
How can I view the progress in the console, but only write the last part of the output to file?
I want this
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
106 1166 0 0 106 1166 0 2514 --:--:-- --:--:-- --:--:-- 2 106 1166 0 0 106 1166 0 795 0:00 :01 0:00:01 --:--:-- 0 106 1166 0 0 106 1166 0 660 0:00:01 0:00:01 --:--:-- 0
This is what I get with the second curl redirect
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 9.8G 0 0 0 16384 0 24764 4d 22h --:--:-- 4d 22h 24764
0 9.8G 0 0 0 3616k 0 4098k 0:41:54 --:--:-- 0:41:54 15.9M
0 9.8G 0 0 0 24.2M 0 12.9M 0:12:59 0:00:01 0:12:58 19.8M
0 9.8G 0 0 0 50.4M 0 17.5M 0:09:34 0:00:02 0:09:32 22.7M
0 9.8G 0 0 0 79.8M 0 20.5M 0:08:09 0:00:03 0:08:06 24.7M
1 9.8G 0 0 1 101M 0 20.7M 0:08:04 0:00:04 0:08:00 24.0M
1 9.8G 0 0 1 129M 0 21.9M 0:07:37 0:00:05 0:07:32 25.1M
1 9.8G 0 0 1 150M 0 21.8M 0:07:41 0:00:06 0:07:35 25.1M
1 9.8G 0 0 1 169M 0 21.5M 0:07:47 0:00:07 0:07:40 23.8M
1 9.8G 0 0 1 195M 0 21.9M 0:07:38 0:00:08 0:07:30 23.0M
2 9.8G 0 0 2 219M 0 22.1M 0:07:33 0:00:09 0:07:24 23.5M
2 9.8G 0 0 2 243M 0 22.4M 0:07:29 0:00:10 0:07:19 22.9M
2 9.8G 0 0 2 273M 0 22.9M 0:07:17 0:00:11 0:07:06 24.6M
..
.. hundreds of lines...
..
99 9.8G 0 0 99 9982M 0 24.8M 0:06:45 0:06:41 0:00:04 24.5M
99 9.8G 0 0 99 9.7G 0 24.8M 0:06:44 0:06:42 0:00:02 24.9M
99 9.8G 0 0 99 9.8G 0 24.8M 0:06:44 0:06:43 0:00:01 26.0M
100 9.8G 0 0 100 9.8G 0 24.8M 0:06:44 0:06:44 --:--:-- 25.8M
Edit:
What I dont understand is according to http://www.tldp.org/LDP/abs/html/io-redirection.html
2>filename
# Redirect stderr to file "filename."
but if I use that I don't get every single stderr progress line in the file. If I try any of the other solutions every stderr progress line is redirected to file
Just as a video is a series of frames, an updating percentage in the console is a series of lines. What is in the file is the true output. The difference is a carriage return in a text file puts the following text below, whereas in the console it overwrites the current line.
If you want to see the updating percentage in the console, but not the file, you could use something like this:
curl |& tee >(sed '1b;$!d' > log)
Or:
curl |& tee /dev/tty | sed '1b;$!d' > log
Let's say you have 10 huge .txt files test1.txt ... test10.txt
Here's how you would upload them with a single cURL command and log the results without logging the progress meter, the trick is to use the --write-out or -w option, from the man file, these are all the relevent fields for uploading to FTP:
curl -T "test[1-10:1].txt" -u user:password sftp://example.com:22/home/user/ -# -k -w "%{url_effective}\t%{ftp_entry_path}\t%{http_code}\t%{http_connect}\t%{local_ip}\t%{local_port}\t%{num_connects}\t%{num_redirects}\t%{redirect_url}\t%{remote_ip}\t%{remote_port}\t%{size_download}\t%{size_header}\t%{size_request}\t%{size_upload}\t%{speed_download}\t%{speed_upload}\t%{ssl_verify_result}\t%{time_appconnect}\t%{time_connect}\t%{time_namelookup}\t%{time_pretransfer}\t%{time_redirect}\t%{time_starttransfer}\t%{time_total}\n" >> log.txt
For your log.txt file you may want to pre-pend the column headers first:
echo -e "url_effective\tftp_entry_path\thttp_code\thttp_connect\tlocal_ip\tlocal_port\tnum_connects\tnum_redirects\tredirect_url\tremote_ip\tremote_port\tsize_download\tsize_header\tsize_request\tsize_upload\tspeed_download\tspeed_upload\tssl_verify_result\ttime_appconnect\ttime_connect\ttime_namelookup\ttime_pretransfer\ttime_redirect\ttime_starttransfer\ttime_total" > log.txt
The -# makes the progress bar a bit neater like:
######################################################################## 100.0%
######################################################################## 100.0%
...and the curl -T "test[1-10:1].txt" piece lets you specify a range of files to upload.
Problems:
More and more data nodes become bad health in Cloudera Manager.
Clue1:
no any task or job, just an idle data node here,
top
-bash-4.1$ top
top - 18:27:22 up 4:59, 3 users, load average: 4.55, 3.52, 3.18
Tasks: 139 total, 1 running, 137 sleeping, 1 stopped, 0 zombie
Cpu(s): 14.8%us, 85.2%sy, 0.0%ni, 0.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 7932720k total, 1243372k used, 6689348k free, 52244k buffers
Swap: 6160376k total, 0k used, 6160376k free, 267228k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
13766 root 20 0 2664m 21m 7048 S 85.4 0.3 190:34.75 java
17688 root 20 0 2664m 19m 7048 S 75.5 0.3 1:05.97 java
12765 root 20 0 2859m 21m 7140 S 36.9 0.3 133:25.46 java
2909 mapred 20 0 1894m 113m 14m S 1.0 1.5 2:55.26 java
1850 root 20 0 1469m 62m 4436 S 0.7 0.8 2:54.53 python
1332 root 20 0 50000 3000 2424 S 0.3 0.0 0:12.04 vmtoolsd
2683 hbase 20 0 1927m 152m 18m S 0.3 2.0 0:36.64 java
Clue2:
-bash-4.1$ ps -ef|grep 13766
root 13766 1850 99 16:01 ? 03:12:54 java -classpath /usr/share/cmf/lib/agent-4.6.3.jar com.cloudera.cmon.agent.DnsTest
Clue3:
in cloudera-scm-agent.log,
[30/Aug/2013 16:01:58 +0000] 1850 Monitor-HostMonitor throttling_logger ERROR Timeout with args ['java', '-classpath', '/usr/share/cmf/lib/agent-4.6.3.jar', 'com.cloudera.cmon.agent.DnsTest']
None
[30/Aug/2013 16:01:58 +0000] 1850 Monitor-HostMonitor throttling_logger ERROR Failed to collect java-based DNS names
Traceback (most recent call last):
File "/usr/lib64/cmf/agent/src/cmf/monitor/host/dns_names.py", line 53, in collect
result, stdout, stderr = self._subprocess_with_timeout(args, self._poll_timeout)
File "/usr/lib64/cmf/agent/src/cmf/monitor/host/dns_names.py", line 42, in _subprocess_with_timeout
return SubprocessTimeout().subprocess_with_timeout(args, timeout)
File "/usr/lib64/cmf/agent/src/cmf/monitor/host/subprocess_timeout.py", line 70, in subprocess_with_timeout
raise Exception("timeout with args %s" % args)
Exception: timeout with args ['java', '-classpath', '/usr/share/cmf/lib/agent-4.6.3.jar', 'com.cloudera.cmon.agent.DnsTest']
"cloudera-scm-agent.log" line 30357 of 30357 --100%-- col 1
Backgrouds:
if I restart all nodes, then everythings are OK, but after half and hour or more, bad health is coming one by one.
Version: Cloudera Standard 4.6.3 (#192 built by jenkins on 20130812-1221 git: fa61cf8559fbefeb5af7f223fd02164d1a0adfdb)
I added all nodes in /etc/hosts
the installed CDH is 4.3.1.
in fact, these nodes are VMs with fixed IP address.
Any suggestions?
BTW, where can I download source code of com.cloudera.cmon.agent.DnsTest?