Ruby daemons soft stop - ruby

This simple daemon (written using ruby daemons gem) prints numbers from 0 up to 9 until I stop the daemon using the stop command line option:
require 'rubygems'
require 'daemons'
options = {
:multiple => false,
:ontop => false,
:backtrace => true,
:log_output => true,
:monitor => false
}
Daemons.run_proc('test.rb', options) do
loop do
10.times do |i|
puts "#{i}\n"
sleep(1)
end
end
end
I start the daemon with
ruby simple_daemon.rb start
and stop it with
ruby simple_daemon.rb stop
Is it possible to softly stop the daemon, letting it end its last loop before killing its process, so that I'm sure that it prints all the 10 numbers one last time?

You need to trap the TERM signal which is sent when you call stop and handle it yourself. Your code could be something like this :
Daemons.run_proc('test.rb', options) do
stopped = false
Signal.trap("TERM") do
stopped = true
end
while not stopped
10.times do |i|
puts "#{i}\n"
sleep(1)
end
end
end

Related

Ruby 3 collecting results from multiple scheduled fibers

Ruby 3 introduced Fiber.schedule to dispatch async tasks concurrently.
Similar to what's being asked in this question (which is about threaded concurrency) I would like a way to start multiple concurrent tasks on the fiber scheduler and once they have all been scheduled wait for their combined result, sort of equivalent to Promise.all in JavaScript.
I can come up with this naive way:
require 'async'
def io_work(t)
sleep t
:ok
end
Async do
results = []
[0.1, 0.3, 'cow'].each_with_index do |t, i|
n = i + 1
Fiber.schedule do
puts "Starting fiber #{n}\n"
result = io_work t
puts "Done working for #{t} seconds in fiber #{n}"
results << [n, result]
rescue
puts "Execution failed in fiber #{n}"
results << [n, :error]
end
end
# await combined results
sleep 0.1 until results.size >= 3
puts "Results: #{results}"
end
Is there a simpler construct that will do the same?
Since Async tasks are already scheduled I am not sure you need all of that.
If you just want to wait for all the items to finish you can use an Async::Barrier
Example:
require 'async'
require 'async/barrier'
def io_work(t)
sleep t
:ok
end
Async do
barrier = Async::Barrier.new
results = []
[1, 0.3, 'cow'].each.with_index(1) do |data, idx|
barrier.async do
results << begin
puts "Starting task #{idx}\n"
result = io_work data
puts "Done working for #{data} seconds in task #{idx}"
[idx,result]
rescue
puts "Execution failed in task #{idx}"
[idx, :error]
end
end
end
barrier.wait
puts "Results: #{results}"
end
Based on the sleep values this will output
Starting task 1
Starting task 2
Starting task 3
Execution failed in task 3
Done working for 0.3 seconds in task 2
Done working for 1 seconds in task 1
Results: [[3, :error], [2, :ok], [1, :ok]]
The barrier.wait will wait until all the asynchronous tasks are complete, without it the output would look like
Starting fiber 1
Starting fiber 2
Starting fiber 3
Execution failed in fiber 3
Results: [[3, :error]]
Done working for 0.3 seconds in fiber 2
Done working for 1 seconds in fiber 1
I wasn't too happy with the ergonomics of the solution, so I made the gem fiber-collector to address it.
Disclaimer: I'm describing a library of which I am the author
Example usage in the scenario from the question:
require 'async'
require 'fiber/collector'
def io_work(t)
sleep t
:ok
end
Async do
Fiber::Collector.schedule { io_work(1) }.and { io_work(0.3) }.all
end.wait
# => [:ok, :ok]
Async do
Fiber::Collector.schedule { io_work(1) }.and { io_work(0.3) }.and { io_work('cow') }.all
end.wait
# => raises error

Browsermob Proxy + Watir not capturing traffic continuously

I have the BrowserMob Proxy set up correctly with Watir and it is capturing traffic and saving the HAR file; however, what it's not doing is that it's not capturing the traffic continuously. So following is what I'm trying to achieve:
Go to homepage
Click on a link to go to another page where I need to wait for some events to happen
Once on the second page, start capturing traffic after the event happens and wait for a specific call to occur and capture its contents.
What I'm noticing however, is that it's following all of the above steps, but on step 3 the proxy stops capturing traffic before that call is even made on that page. The HAR that is returned doesn't have that call in it hence the test fails before it even does its job. Following is how the code looks like.
class BMP
attr_accessor :server, :proxy, :net_har, :sel_proxy
def initialize
bm_path = File.path(Support::Paths.cucumber_root + "/browsermob-
proxy-2.1.4/bin/browsermob-proxy")
#server = BrowserMob::Proxy::Server.new(bm_path, {:port => 9999,
:log => false, :use_little_proxy => true, :timeout => 100})
#server.start
#proxy = #server.create_proxy
#sel_proxy = #proxy.selenium_proxy
#proxy.timeouts(:read => 50000, :request => 50000, :dns_cache =>
50000)
#net_har = #proxy.new_har("new_har", :capture_binary_content =>
true, :capture_headers => true, :capture_content => true)
end
def fetch_har_entries(target_url)
har_logs = File.join(Support::Paths.har_logs, "har_file # .
{Time.now.strftime("%m%d%y_%H%M%S")} .har")
#net_har.save_to har_logs
index = 0
while (#net_har.entries.count > index) do
if #net_har.entries[index].request.url.include?(target_url) &&
entry.request.method.eql?("GET")
logs = JSON.parse(entry.response.content.text) if not
entry.response.content.text.nil?
har_logs = File.join(Support::Paths.har_logs, "json_file_# .
{Time.now.strftime("%m%d%y_%H%M%S")}.json")
File.open(har_logs, "w") do |json|
json.write(logs)
end
break
end
index += 1
end
end
end
In my test file I have following
Then("I navigate to the homepage") do
visit(HomePage) do |page|
page.element.click
end
end
And("I should wait for event to capture traffic") do
visit(SecondPage) do |page|
page.wait_until{page.element2.present?)
BMP.fetch_har_entries("target/url")
end
end
What am I missing that is causing the proxy to not capture traffic in its entirety?
In case anyone gets here from a google search, I figured out how to resolve this on my own (thanks stackoverflow community for nothing, lol). So to resolve the issue, i used a custom retriable loop called eventually method.
logs = nil
eventually(timeout: 110, interval: 1) do
#net_har = #proxy.new_har("har", capture_binary_content: true, capture_headers: true, capture_content: true)
#net_har.entries.each do |entry|
begin
break if #net_har.entries.index entry == #net_har.entries.count
next unless entry.request.url.include?(target_url) &&
entry.request.post_data.text.include?(target_body_text)
logs = entry.request.post_data.text
break
rescue TypeError
fail("Response body for the network call came back empty")
end
end
raise EOFError if logs_hash.nil?
end
logs
end
Basically I'm assuming what was happening was the BMP would only cache or capture 30 seconds worth of har logs, and if my network event didn't occur during those 30 secs, i was SOL. So the what above code is doing is that's it's waiting for the logs variable to be not nil, if it is, it raises an EOFError and goes back to the loop initializes the har again and looks for the network call again. It keeps on doing that until it find the call or 110 seconds are up. Following is the eventually method I'm using
def eventually(options = {})
timeout = options[:timeout] || 30
interval = options[:interval] || 0.1
time_limit = Time.now + timeout
loop do
begin
yield
rescue EOFError => error
end
return if error.nil?
raise error if Time.now >= time_limit
sleep interval
end
end

How to properly implement Net::SSH port forwards

I have been trying to get port forwarding to work correctly with Net::SSH. From what I understand I need to fork out the Net::SSH session if I want to be able to use it from the same Ruby program so that the event handling loop can actually process packets being sent through the connection. However, this results in the ugliness you can see in the following:
#!/usr/bin/env ruby -w
require 'net/ssh'
require 'httparty'
require 'socket'
include Process
log = Logger.new(STDOUT)
log.level = Logger::DEBUG
local_port = 2006
child_socket, parent_socket = Socket.pair(:UNIX, :DGRAM, 0)
maxlen = 1000
hostname = "www.example.com"
pid = fork do
parent_socket.close
Net::SSH.start("hostname", "username") do |session|
session.logger = log
session.logger.sev_threshold=Logger::Severity::DEBUG
session.forward.local(local_port, hostname, 80)
child_socket.send("ready", 0)
pidi = fork do
msg = child_socket.recv(maxlen)
puts "Message from parent was: #{msg}"
exit
end
session.loop do
status = waitpid(pidi, Process::WNOHANG)
puts "Status: #{status.inspect}"
status.nil?
end
end
end
child_socket.close
puts "Message from child: #{parent_socket.recv(maxlen)}"
resp = HTTParty.post("http://localhost:#{local_port}/", :headers => { "Host" => hostname } )
# the write cannot be the last statement, otherwise the child pid could end up
# not receiving it
parent_socket.write("done")
puts resp.inspect
Can anybody show me a more elegant/better working solution to this?
I spend a lot of time trying to figure out how to correctly implement port forwarding, then I took inspiration from net/ssh/gateway library. I needed a robust solution that works after various possible connection errors. This is what I'm using now, hope it helps:
require 'net/ssh'
ssh_options = ['host', 'login', :password => 'password']
tunnel_port = 2222
begin
run_tunnel_thread = true
tunnel_mutex = Mutex.new
ssh = Net::SSH.start *ssh_options
tunnel_thread = Thread.new do
begin
while run_tunnel_thread do
tunnel_mutex.synchronize { ssh.process 0.01 }
Thread.pass
end
rescue => exc
puts "tunnel thread error: #{exc.message}"
end
end
tunnel_mutex.synchronize do
ssh.forward.local tunnel_port, 'tunnel_host', 22
end
begin
ssh_tunnel = Net::SSH.start 'localhost', 'tunnel_login', :password => 'tunnel_password', :port => tunnel_port
puts ssh_tunnel.exec! 'date'
rescue => exc
puts "tunnel connection error: #{exc.message}"
ensure
ssh_tunnel.close if ssh_tunnel
end
tunnel_mutex.synchronize do
ssh.forward.cancel_local tunnel_port
end
rescue => exc
puts "tunnel error: #{exc.message}"
ensure
run_tunnel_thread = false
tunnel_thread.join if tunnel_thread
ssh.close if ssh
end
That's just how SSH in general is. If you're offended by how ugly it looks, you should probably wrap up that functionality into a port forwarding class of some sort so that the exposed part is a lot more succinct. An interface like this, perhaps:
forwarder = PortForwarder.new(8080, 'remote.host', 80)
So I have found a slightly better implementation. It only requires a single fork but still uses a socket for the communication. It uses IO#read_nonblock for checking if a message is ready. If there isn't one, the method throws an exception, in which case the block continues to return true and the SSH session keeps serving requests. Once the parent is done with the connection it sends a message, which causes child_socket.read_nonblock(maxlen).nil? to return false, making the loop exit and therefore shutting down the SSH connection.
I feel a little better about this, so between that and #tadman's suggestion to wrap it in a port forwarding class I think it's about as good as it'll get. However, any further suggestions for improving this are most welcome.
#!/usr/bin/env ruby -w
require 'net/ssh'
require 'httparty'
require 'socket'
log = Logger.new(STDOUT)
log.level = Logger::DEBUG
local_port = 2006
child_socket, parent_socket = Socket.pair(:UNIX, :DGRAM, 0)
maxlen = 1000
hostname = "www.example.com"
pid = fork do
parent_socket.close
Net::SSH.start("ssh-tunnel-hostname", "username") do |session|
session.logger = log
session.logger.sev_threshold=Logger::Severity::DEBUG
session.forward.local(local_port, hostname, 80)
child_socket.send("ready", 0)
session.loop { child_socket.read_nonblock(maxlen).nil? rescue true }
end
end
child_socket.close
puts "Message from child: #{parent_socket.recv(maxlen)}"
resp = HTTParty.post("http://localhost:#{local_port}/", :headers => { "Host" => hostname } )
# the write cannot be the last statement, otherwise the child pid could end up
# not receiving it
parent_socket.write("done")
puts resp.inspect

Gem Daemons - How to run several different daemons

Basically I just want to run several daemons in my ruby script :
require 'daemons'
Daemons.run path_1, { :ARGV => ['start'], :app_name => 'app1', :multiple => true, ... }
Daemons.run path_2, { :ARGV => ['start'], :app_name => 'app2', :multiple => true, ... }
But the second Daemons.run is never called when ARGV[0] == 'start' (works perfectly with 'status'/'stop'). What is the right way to do it ?
from http://daemons.rubyforge.org
3- Control a bunch of daemons from another application
Layout: you have an application my_app.rb that wants to run a bunch of server tasks as daemon processes.
# this is my_app.rb
require 'rubygems' # if you use RubyGems
require 'daemons'
task1 = Daemons.call(:multiple => true) do
# first server task
loop {
conn = accept_conn()
serve(conn)
}
end
task2 = Daemons.call do
# second server task
loop {
something_different()
}
end
# the parent process continues to run
# we can even control our tasks, for example stop them
task1.stop
task2.stop
exit
does it fit?

Disposing of a thread in ruby or jruby

I have a rabbitmq queue subscriber that spins up a new thread every time a new message is consumed:
AMQP.start(#conf) do |connection|
channel = AMQP::Channel.new(connection)
requests_queue = channel.queue("one")
requests_queue.subscribe(:ack => true) do |header, body|
puts "we have a message at #{Time.now} and n is #{n}"
url_search = MultiJson.decode(body)
Thread.new do
5.times do
lead = get_lead(n, (n == 5))
puts "message #{n} is_last = #{lead.is_last} at #{Time.now}";
AMQP::Exchange.default.publish(
MultiJson.encode(lead),
:routing_key => header.reply_to,
:correlation_id => header.correlation_id
)
n += 1
sleep(2)
end
end
end
end
My question is, how do I dispose of the thread after the message is handled? Should I be using the threadpool?
I am using JRuby. Does the above code create a Java JVM thread behind the scenes using the normal ruby syntax or should I be explicitly creating a Java thread?
You don't have to manually dispose the thread I think, and you should be using ruby threads, from what I gather they are java threads in jruby, which is from what jruby gets it's nice performance.
A common thing to do is to spin up a couple of threads and then join all of them before continuing if you want to be sure that all are complete before the next step, but it doesn't seem to be required here.
Here's a little test program:
# foo.rb
a = Thread.new { print "a"; sleep(1); print "b"; print "c" }
require 'pp'
pp Thread.list
puts "foo"
sleep(2);
pp Thread.list
puts "bar"
As you can see the spawned background thread is automatically removed. (Tested in jruby as well as 1.9.2
$ ruby foo.rb
a[#<Thread:0x00000100887678 run>, #<Thread:0x0000010086c7d8 sleep>]
foo
bc[#<Thread:0x00000100887678 run>]
bar

Resources