Datamapper transaction doesn't rollback - ruby

I have such code:
Vhost.transaction do
domains.each do |domain|
unless domain.save
errors << domain.errors
end
end
unless vhost.save
errors << vhost.errors
end
end
I expect a rollback if any domain.save or vhost.save fails. But there is no rollback. What am I doing wrong?

I've had success with this pattern:
DataMapper::Model.raise_on_save_failure = true
MyModel.transaction do
begin
# do stuff
rescue DataMapper::SaveFailureError
t.rollback
end
end
Edit
Ok so you want to keep record of all errors before rolling back, then try something like this:
Vhost.transaction do |t|
new_errors = []
domains.each do |domain|
unless domain.save
new_errors << domain.errors
end
end
unless vhost.save
new_errors << vhost.errors
end
errors += new_errors
t.rollback if new_errors.any?
end

Related

Ruby code not executed after method call

Code is not executed (puts "hey") in the harvest method after the call to searchEmails(page). I'm probably missing something simple with Ruby because I'm just getting back into it.
def searchEmails(page_to_search)
begin
html = #agent.get(url).search('html').to_s
mail = html.scan(/['.'\w|-]*#+[a-z]+[.]+\w{2,}/).map.to_a
base = page_to_search.uri.to_s.split("//", 2).last.split("/", 2).first
mail.each{|e| #file.puts e+";"+base unless e.include? "example.com" or e.include? "email.com" or e.include? "domain.com" or e.include? "company.com" or e.length < 9 or e[0] == "#"}
end
end
def harvest(url)
begin
page = #agent.get(url)
searchEmails(page)
puts "hey"
rescue Exception
end
end
url="www.example.com"
harvest(url)
#agent.get(url) will fail with a bad url or network outage.
The problem in your code could be written as follows:
def do_something
begin
raise
puts "I will never get here!"
rescue
end
end
Since you can't get rid of the raise, you need to do something inside the rescue (most likely log it):
begin
#agent.get(url)
# ...
rescue Timeout::Error, Errno::EINVAL, Errno::ECONNRESET, EOFError,
Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError,
Net::ProtocolError => e
log(e.message, e.callback)
end

Grabbing JSON data from API with multi-threaded requests

I'm using httparty for making requests and currently have the following code:
def scr(users)
users.times do |id|
test_url = "siteurl/#{id}"
Thread.new do
response = HTTParty.get(test_url)
open('users.json', 'a') do |f|
f.puts "#{response.to_json}, "
end
p "added"
end
end
sleep
end
It works OK for 100-300 records.
I tried adding Thread.exit after sleep, but if I set users to something like 200000, after a while my terminal throws an error. I don't remember what it was but it's something about threads... resource is busy but some records. (About 10000 were added successfully.)
It looks like I'm doing it wrong and need to somehow break requests to batches.
up
here's what I got:
def scr(users)
threads = []
urls = []
users.times do |id|
test_url = "site_url/#{id}"
urls<<test_url
end
urls.each_slice(8) do |batch|
batch.each do |t|
threads << Thread.new do
response = HTTParty.get(t)
response.to_json
end
end
end
all_values = threads.map {|t| t.value}.join(', ')
open('users.json', 'a') do |f|
f.puts all_values
end
On quick inspection, the problem would seem to be that you have a race condition with regards to your JSON file. Even if you don't get an error, you'll definitely get corrupted data.
The simplest solution is probably just to do all the writing at the end:
def scr(users)
threads = []
users.times do |id|
test_url = "siteurl/#{id}"
threads << Thread.new do
response = HTTParty.get(test_url)
response.to_json
end
end
all_values = threads.map {|t| t.value}.join(', ')
open('users.json', 'a') do |f|
f.puts all_values
end
end
Wasn't able to test that, but it should do the trick. It's also better in general to be using Thread#join or Thread#value instead of sleep.

Skip to next host in array if an error is thrown

Say I have an array which is populated with IP addresses. Now say I have a method that tries to connect to each host in the array. In this particular instance I don't need to know any details about the failed connections, I just want to skip to the next host if one fails to connect or if ANY errors are thrown. How do I do this?
The problem I'm having is that, occasionally, when connecting to one of the hosts the program will throw an ENETUNREACH error and then kill the program. I tried solving it by just rescuing the error, but then what happens is the program will just stop executing without throwing any errors. How do I get it to skip the host in the array, and just move on to the next host?
def popen(host)
addr = Socket.getaddrinfo(host, nil)
sock = Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0)
begin
sock.connect_nonblock(Socket.pack_sockaddr_in(22, addr[0][3]))
rescue Errno::EINPROGRESS
resp = IO.select(nil, [sock], nil, #timeout.to_i)
if resp.nil?
false
end
begin
if sock.connect_nonblock(Socket.pack_sockaddr_in(22, addr[0][3]))
sshlog(host, #user, #pass)
end
rescue Errno::ECONNREFUSED
false
end
end
sock
end
That is the connect method. And this:
def randIP
begin
threads = []
if #arg1 =~ /^([1-9][0-9]{0,2}|1000)$/
t1 = Time.now
s = 1
while s <= #arg1.to_i do
#host_arr << Array.new(4){rand(254)}.join('.')
s += 1
end
#host_arr.each do |ip|
threads << Thread.new do
begin
popen(ip)
rescue Errno::ENETUNREACH
end
end
end
threads.each do |thread|
thread.join
end
t2 = Time.now
time = t2 - t1
STDOUT.puts "done"
proxylst(#res_arr, #user, #pass)
else
puts "Host range too large! Must be a number between 1 and 1000."
end
rescue
false
end
end
Is a method that generates random IP addresses, puts them into an array, and uses the popen method above to attempt a connection on each host. Considering the nature of random IP address generation, chances are at least one out of 1000 hosts is going to be either invalid, or unreachable in some way. So what I need is a way to try every host in the array skipping any that throw errors.
typically how I handle this is like so:
exception_array = []
Connections.each do |con|
begin
#connection code
rescue Exception => e
exception_array << e
end
end
unless exception_array.empty?
msg = ""
exception_array.each do |e|
msg << e.to_s
end
raise msg
end
This way you keep track of all exceptions that were thrown without breaking the code on a failure.

I must be misunderstanding Celluloid

I currently have a script written in Ruby that scans a range of IP addresses and tries to connect to them. It's extremely slow at the moment. It takes up to 300 seconds to scan 254 hosts on the network, and that's obviously not very practical. What I'm trying to do is give the script some concurrency in hopes of speeding up the script. So far this is what I have:
require 'socket'
require 'celluloid'
$res_arr = []
class Ranger
include Celluloid
def initialize(host)
#host = host
#timeout = 1
end
def ip_range(host)
host =~ /(?:\d{1,3}\.){3}[xX*]{1,3}/
end
def ctrl(host)
begin
if ip_range(host)
strIP = host.gsub(/[xX*]/, '')
(1..254).each do |oct|
$res_arr << strIP+oct.to_s
end
else
puts "Invalid host!"
end
rescue
puts "onnection terminated."
end
end
def connect
addr = Socket.getaddrinfo(#host, nil)
sock = Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0)
begin
sock.connect_nonblock(Socket.pack_sockaddr_in(22, addr[0][3]))
rescue Errno::EINPROGRESS
resp = IO.select(nil, [sock], nil, #timeout.to_i)
if resp.nil?
$res_arr << "#{#host} Firewalled!"
end
begin
if sock.connect_nonblock(Socket.pack_sockaddr_in(22, addr[0][3]))
$res_arr << "#{#host}Connected!"
end
rescue Errno::ECONNREFUSED
$res_arr << "#{#host} Refused!"
rescue
false
end
end
sock
end
def output(contents)
puts contents.value
end
end # Ranger
main = Ranger.new(ARGV[0])
main.ctrl(ARGV[0])
$res_arr.each do |ip|
scan = Ranger.new(ip)
scnftr = scan.future :connect
scan.output(scnftr)
end
The script works, but it takes just as long as before I included Celluloid at all. Am I misunderstanding how Celluloid works and what it's supposed to do?
Your problem is that each iteration of your loop starts a future, then immediately waits for it to return a value. What you want instead is start all futures, then wait for all futures to finish in two separate steps:
futures = $res_arr.map do |ip|
scan = Ranger.new(ip)
scan.future :connect
end
# now that all futures are running, we can start
# waiting for the first one to finish
futures.each do |future|
puts future.value
end
Here's another example from the celluloid source: https://github.com/celluloid/celluloid/blob/master/examples/simple_pmap.rb

How to test adding line to error log by causing rescue statement in rspec

I have a this Foo class.
class Foo
def open_url
Net::HTTP.start("google.com") do |http|
# Do Something
end
rescue Exception => e
File.open("error.txt","a+"){|f| f.puts e.message }
end
end
And I want to test by this Rspec.
require_relative 'foo'
describe Foo do
describe "#open_url" do
it "should put error log if connection fails" do
Net::HTTP.stub(:start).and_return(Exception)
# Check if a line to error.txt is added.
end
end
end
How can I check if a line is inserted to error.txt?
You could read the file you are actually writing
describe Foo do
describe "#open_url" do
it "should put error log if connection fails" do
Net::HTTP.stub(:start).and_raise
Foo.open_url
file = File.open("error.txt", "r")
# ...
end
end
end
An alternative would be to check that the file is opened: something along the lines of this
File.should_receive(:open).with("error.txt", "a+", {|f| f.puts e.message })

Resources