Using Ruby to get email attachment ID - ruby

I am trying to loop through a set of emails, and get their email attachment ID. I can't seem to get down to the ID, though.
I've gotten this far:
class AccessEmailController < ApplicationController
def access_email
client = Signet::OAuth2::Client.new(access_token: session[:access_token])
service = Google::Apis::GmailV1::GmailService.new
service.authorization = client
user_id = 'me'
email_list = service.list_user_messages('me', q: "subject:(CCC Data)")
message = 'No Emails Found' if email_list.messages.empty?
email_list.messages.each do |message|
message_id = message.id
#subject = service.get_user_message(user_id, message_id).payload.parts
end
end
end
And, if I display #subject in a view, I get:
[#<Google::Apis::GmailV1::MessagePart:0x00007f8630c397a0 #headers=[#<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c344a8 #name="Content-Type", #value="multipart/alternative; boundary=\"00000000000020396d05b7ceda38\"">], #mime_type="multipart/alternative", #body=#<Google::Apis::GmailV1::MessagePartBody:0x00007f8630c39250 #size=0>, #filename="", #part_id="0", #parts=[#<Google::Apis::GmailV1::MessagePart:0x00007f8630c30588 #headers=[#<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c2d2e8 #name="Content-Type", #value="text/plain; charset=\"UTF-8\"">], #mime_type="text/plain", #body=#<Google::Apis::GmailV1::MessagePartBody:0x00007f8630c2ed50 #size=91, #data="-- \r\nKim Floyd (Davis)\r\n\r\nWeb Developer, EvenVision\r\n707.845.3073\r\nk.davis#evenvision.com\r\n">, #filename="", #part_id="0.0">, #<Google::Apis::GmailV1::MessagePart:0x00007f8630c2be98 #headers=[#<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c2a840 #name="Content-Type", #value="text/html; charset=\"UTF-8\"">, #<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c29ff8 #name="Content-Transfer-Encoding", #value="quoted-printable">], #mime_type="text/html", #body=#<Google::Apis::GmailV1::MessagePartBody:0x00007f8630c2b858 #size=344, #data="<div dir=\"ltr\"><br clear=\"all\"><div><br></div>-- <br><div dir=\"ltr\" class=\"gmail_signature\" data-smartmail=\"gmail_signature\"><div dir=\"ltr\">Kim Floyd (Davis)<div><br><div>Web Developer, EvenVision</div><div>707.845.3073</div><div>k.davis#evenvision.com</div></div></div></div></div>\r\n">, #filename="", #part_id="0.1">]>, #<Google::Apis::GmailV1::MessagePart:0x00007f8630c28f68 #headers=[#<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c27b90 #name="Content-Type", #value="image/png; name=\"image.png\"">, #<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c26fd8 #name="Content-Disposition", #value="attachment; filename=\"image.png\"">, #<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c266f0 #name="Content-Transfer-Encoding", #value="base64">, #<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c25d40 #name="X-Attachment-Id", #value="f_kjdr80n00">, #<Google::Apis::GmailV1::MessagePartHeader:0x00007f8630c25250 #name="Content-ID", #value="<f_kjdr80n00>">], #mime_type="image/png", #body=#<Google::Apis::GmailV1::MessagePartBody:0x00007f8630c28a90 #size=459005, #attachment_id="ANGjdJ9LKnYW-GChtR3_oaBWnwi0DjHG4I8ZeDuMskuJ6htxLGmwNaB-v3x0k_cP94ybJFg0HLPO0AkSLWb-zXvF25qaGyhA20R89kwPSC9j3ttVEsBhByEAxrPkJQVwKuf3H8EblPhNNfPA7iYEJ7zT56do8-_UbMbDuXo-fVU445mtPHSYHWOEX_4z8aUyNTU3MCzNY8EHz90mJRpJnseelK3whaZdcJ2E98XmxQ">, #filename="image.png", #part_id="1">]
Which, does contain the attachment I am looking for. How can I get down to that attachment, though, and get it's ID?

email_list.messages.each do |msg|
message = gmail.get_user_message(user_id, msg.id)
parts = message.payload.parts || []
parts.each do |part|
p part.body.attachment_id
end
end

Related

Discord.py, member.status isn't working as it should

#bot.tree.command(name="user", description="Shows some informations about the mentioned user.")
async def user(interaction: discord.Interaction, member:discord.Member=None):
if member == None:
member = interaction.user
roles = [role for role in member.roles if role.name != "#everyone"]
embed = discord.Embed(title=f"Details about the user, {member.name}",color=0xdaddd8, timestamp = datetime.datetime.utcnow())
embed.set_thumbnail(url=member.avatar) # Avatar Thumbnail
embed.add_field(name="👤 Name", value = f"{member.name}#{member.discriminator}") # Embeds
embed.add_field(name="🏷️ Nickname", value = member.display_name)
embed.add_field(name="🆔 User ID", value = member.id)
embed.add_field(name="📆 Created at", value = member.created_at.strftime("%D \n%I:%M %p"))
embed.add_field(name="👋🏻 Joined at", value = member.joined_at.strftime("%D \n%I:%M %p"))
embed.add_field(name="🟢 Status", value = member.status) #this line is'nt working as it should
embed.add_field(name="❤️‍🔥 Top role", value = member.top_role.mention)
bot_status = "Yes, it is" if member.bot else "No, They'snt"
embed.add_field(name="🤖 Bot?", value = bot_status)
embed.set_footer(text = interaction.user.name,icon_url = interaction.user.avatar)
await interaction.response.send_message(embed=embed)
I made a User information command and this command shows every person offline even itself how can i fix it?
This is likely due to lack of intents.
Add in your code, under the intents you define:
intents.members = True
intents.presences = True
Use member.raw_status instead of member.status. It will return a string value such as 'online', 'offline' or 'idle'.

how to exclude from counting holidays in rails4

I am trying to make an app for vacation requests but i am facing a problem.I have e model called VacationRequest and a view of VacationRequest where the result will be shown.
VacationRequest.rb model
def skip_holidays
count1 = 0
special_days = Date.parse("2017-05-09", "2017-05-12")
special_days.each do |sd|
if ((start_data..end_data) === sd)
numero = (end_data - start_data).to_i
numro1 = numero - sd
else
numero = (end_data - start_data).to_i
end
end
end
VacationRequest show.htm.erb
here i called the method from model
#vacation_request.skip_holidays
this is showing errors and is not working. Please help me with that!
My approach to this would be the following:
def skip_holidays
special_days = ["2017-05-09", "2017-05-12"].map(&:to_date)
accepted_days = []
refused_days = []
(start_data..end_data).each do |requested_date|
accepted_days << requested_date unless special_days.include?(requested_date)
refused_days << requested_date if special_days.include?(requested_date)
end
accepted_day_count = accepted_days.count
refused_days_count = refused_days.count
end
This way you iterated over all requested dates (the range), checked if the date is a special day - If so, refuse it, otherwise accept it.
In the end you can display statistics about accepted and refused dates.
You cannot create a date range by passing multiple argument to the constructor. Instead call the constructor twice to create a range:
special_days = Date.parse("2017-05-09") .. Date.parse("2017-05-12")
Or if instead you want only the two dates, do:
special_days = ["2017-05-09", "2017-05-12"].map &:to_date
This Date.parse("2017-05-09", "2017-05-12") don`t create a range, only return the first params parsed:
#irb
Date.parse("2017-05-09", "2017-05-12")
=> Tue, 09 May 2017
You can do it this way:
initial = Date.parse("2017-05-09")
final = Date.parse("2017-05-12")
((initial)..final).each do |date|
#rules here.
end

For loop while using scrapy

I am trying to crawl a website. I want to do it for different dates. So i am storing date in a list. But while trying to access items of list, crawler works only for 1st value in list. Please help. following is my code:
class SpidyQuotesViewStateSpider(scrapy.Spider):
name = 'retail_price'
def start_requests(self):
print "start request"
urls = "http://fcainfoweb.nic.in/PMSver2/Reports/Report_Menu_web.aspx"
yield scrapy.Request(url=urls, callback=self.parse)
def parse(self, response):
dated = ["05/03/2017","04/03/2017"]
urls = "http://fcainfoweb.nic.in/PMSver2/Reports/Report_Menu_web.aspx"
#frmdata =
cookies1 ={}
val = response.headers.getlist('Set-Cookie')
print "login session values" ,response.headers.getlist('Set-Cookie')
if(len(val) != 0):
cookies1['ASP.NET_SessionId'] = str(response.headers.getlist('Set-Cookie')[0].split(";")[0].split("=")[1])
cookies1['path'] = str(response.headers.getlist('Set-Cookie')[0].split(";")[1].split("=")[1])
print cookies1;
for i in range(len(dated)):
yield scrapy.FormRequest(url=urls, callback=self.parse1, formdata={'ctl00$MainContent$btn_getdata1':"Get Data",
'ctl00$MainContent$Txt_FrmDate':dated[i],
'ctl00$MainContent$Ddl_Rpt_Option0':"Daily Prices",
'ctl00$MainContent$Rbl_Rpt_type':"Price report",
'ctl00$MainContent$ddl_Language':"English",
'ctl00$MainContent$Ddl_Rpt_type':"Retail",
'__EVENTVALIDATION':"IwZyKgfTXVzxiHxiPXGk/W8XQZBDb0EOPxJh6s8hofq0ffqOpiHSH77CafcxySF3PbkYgSMNFCJhLM2cGnL6SxT0PJuGDCJtV0V8Y4a94UErUCiSANiin+4uKckk9v9Ux8JqTVeaipppmlH+wyks2U9SgPfkNUsqw4eHCkDyB5akNNZImRIixOHHVY3JSXGkwXn7ueK9w+AgnqJzpXaWdMr9J1++M4VAFImSNF8brFSfPHe5kb/qzkGIwUr/KRouaRYK8WLWZh/Mbl9xwREwhDSxWJSOdihSE0WWoaqSMtpaR99rDDCsD3mdJqfu0aPIlREupTZRzlrmztXU0eS3949YW+ywdTRvykaMNgOW2Q4saYP5j/niKbRW6GiDnaLV2A38X/HW80+trrsjwJr9tjTKVFyikf6s/3gzyiTp11ivSkwIY2b3hutjYn7OfTDo",
#'__EVENTVALIDATION':"HqVo2xHk04clYwnBposXbZGhbIr181A7RbyeZv74Cia7rXSKmpOpbeSnn3XXnoDJKRxMK0W9nxKZFfkNje+P/K7gE5HVjHJr9Gr0Gs46TntzKDsvzyii8jZ7e0fdZgQCJKoXxQNgR2vNkWqChKcEldBuMHCOgJRqCNCF/JPFKpdKZoIWr7GU8rhzwLijf/Gkm+FuTULs/fl2HHK6Z1QQEozzEHFsDwzl0G4IiN//eNYfHuUBXKZ3wdZzPqG0s53WHEuSBzhqBC9AtCJOs4ZZhdtwFh8iyTJ4PlsLP9DLHYHRCOAd72UO0UH8gT7gAkKVo1I4L540DilowOR9SttH7MM/oOs9qhKlnG61FgqkYGW8zGzF/yNEXO+beVAK1RVvuO+FDnuq/g36TRnUieei5GpAZ+96CSoCIxykdvHx8R+smTNF/5erlowV4ci+tcI7",
'__VIEWSTATEENCRYPTED':"",
'__VIEWSTATEGENERATOR':"85862B00",
'__VIEWSTATE':"+a+3jrBEKxDdkPOzx2wXwKaTMWvCB60WPaHRfJUAZQrdFIpxSqFr5VseTclpGzeHXdxaFnxJe/PkxDKYa7sj3Wiv/os1bNeX0IEB3s45eFsHYWGiU8cvsXCGa5z7rrGRDL5hotg7k/MuUWj8w27xXZO423MN5OsHS+wh+tC/5/Xix+w3zxuQhi8jR5DnreimHbhGZn1sYaKYIGCc8mDIDRNl+w1OZ058F+3LAx96QUu5BYiMYOmrlyxrb9b2yPTmmIrI4NtC4ClBQlxuST5wMDP3vUqqWMhn4auk8ev5gHyPestCRrsAXWs07wDNnikemMwo/4wPiTEbnZQV6SLcDUw0gZpXjXwLI7mhsVjEyVNaQnJp6+Wi6FLsAEEMlFYmQut3JecpVIUkjF9uYSN2GLIbXHPs37AiEXPeQ8E/GyBMx3z1X5l8sw/xSNmFgYQC3riajn8V0+SdkuV2PbNbYKtc+uoSCNLppLYCqiOv5eWanGvAQro2Q67FBA4w2xY+V/K8mzHaGMLoDBxJxLslWyJpL5cX0C6qoXVUu8B028auAQM4eVzH1YPF5qrJiCDo",
#'__VIEWSTATE':"W+m8kNAS6QHiRPo+zFj00EDs/Dbq+y/XvtCmSNwOIkGKlikAlphT8HBAWQDskSm1vdNterBuo0Hy7m4xPbXMOnyEm6IlseXO3jPw+ofnI2WHAKknLil+GeS0IfMWGeoD5aNyiz3zh1jZkKU7R7hQsxwARoHRyjhf8UCooFbkVvL6ddHVYZbH5LcocmCF1BTOCqYN5y5yzfDfYbp3KNW9kH53pdmwCsjiEirdxxUGDoG1Ke3JBEXfSl+4XubirHSR8z+VlFmPPXZGU8mMogwq9Eg822RYjvbwvZG74djcf7kdfB9KXCPO9u6cWIjLiW+cfXHSXD+1XYFVf9ATU2/NV4YbUzsI4PJRwoGD4BryUNIm2JFeT4c8F4REYTA16shxz5mDTFQ6rbmg6SmqP8G9gAc2Hr9ABD8+2BUNabGhNZ8wDIZArfYS4pl5DNrlPlpqeCjhmvv0znKAJSOac3pCUej8G90ZGwQKOPORWbNVzQShoH7QvrXV8pCklcia6psuAGO+Oj72oDWPxedE4DjdjX5TbLoW4bzsk/YNfUv4JpjGR8DWpG8IFYJG9CCjMEYb",
'__LASTFOCUS':"",
'__EVENTARGUMENT':"",
'__EVENTTARGET':"",
'ctl00_MainContent_ToolkitScriptManager1_HiddenField':";;AjaxControlToolkit,+Version=4.1.51116.0,+Culture=neutral,+PublicKeyToken=28f01b0e84b6d53e:en-US:fd384f95-1b49-47cf-9b47-2fa2a921a36a:475a4ef5:addc6819:5546a2b:d2e10b12:effe2a26:37e2e5c9:5a682656:c7029a2:e9e598a9"},method='POST',cookies = cookies1)
def parse1(self, response):
path1 = "id('Panel1')"
value1 = response.xpath(path1).extract_first()
print value1
First of all, you are sending the spider more time on the same site, though with different form parameters. You have therefore to use dont_filter=True in the request, otherwise Scrapy blocks duplicate calls.
Then it seems to me that the site you are scraping don't allow you to make more than one request during the same session. Try for example to go to http://fcainfoweb.nic.in/PMSver2/Reports/Report_Menu_web.aspx with your browser, compile the form, get the data and than to go back to the initial page: It's impossible. So you have to modify your spider. Here's a very rough code just to give an idea. It works for me, but please don't use it in production!
class SpidyQuotesViewStateSpider(scrapy.Spider):
name = 'retail_price'
urls = "http://fcainfoweb.nic.in/PMSver2/Reports/Report_Menu_web.aspx"
def start_requests(self):
dated = ["01/03/2017","05/03/2017","04/03/2017"]
for i in dated:
request = scrapy.Request(url=self.urls, dont_filter=True, callback=self.parse)
request.meta['question'] = i
yield request
def parse(self, response):
thedate = response.meta['question']
cookies1 ={}
val = response.headers.getlist('Set-Cookie')
print("login session values" ,response.headers.getlist('Set-Cookie'))
if(len(val) != 0):
cookies1['ASP.NET_SessionId'] = str(str(response.headers.getlist('Set-Cookie')[0]).split(";")[0].split("=")[1])
cookies1['path'] = str(str(response.headers.getlist('Set-Cookie')[0]).split(";")[1].split("=")[1])
yield scrapy.FormRequest(url=self.urls, dont_filter=True, callback=self.parse1, formdata={'ctl00$MainContent$btn_getdata1':"Get Data",
'ctl00$MainContent$Txt_FrmDate': thedate,
'ctl00$MainContent$Ddl_Rpt_Option0':"Daily Prices",
'ctl00$MainContent$Rbl_Rpt_type':"Price report",
'ctl00$MainContent$ddl_Language':"English",
'ctl00$MainContent$Ddl_Rpt_type':"Retail",
'__EVENTVALIDATION':"IwZyKgfTXVzxiHxiPXGk/W8XQZBDb0EOPxJh6s8hofq0ffqOpiHSH77CafcxySF3PbkYgSMNFCJhLM2cGnL6SxT0PJuGDCJtV0V8Y4a94UErUCiSANiin+4uKckk9v9Ux8JqTVeaipppmlH+wyks2U9SgPfkNUsqw4eHCkDyB5akNNZImRIixOHHVY3JSXGkwXn7ueK9w+AgnqJzpXaWdMr9J1++M4VAFImSNF8brFSfPHe5kb/qzkGIwUr/KRouaRYK8WLWZh/Mbl9xwREwhDSxWJSOdihSE0WWoaqSMtpaR99rDDCsD3mdJqfu0aPIlREupTZRzlrmztXU0eS3949YW+ywdTRvykaMNgOW2Q4saYP5j/niKbRW6GiDnaLV2A38X/HW80+trrsjwJr9tjTKVFyikf6s/3gzyiTp11ivSkwIY2b3hutjYn7OfTDo",
#'__EVENTVALIDATION':"HqVo2xHk04clYwnBposXbZGhbIr181A7RbyeZv74Cia7rXSKmpOpbeSnn3XXnoDJKRxMK0W9nxKZFfkNje+P/K7gE5HVjHJr9Gr0Gs46TntzKDsvzyii8jZ7e0fdZgQCJKoXxQNgR2vNkWqChKcEldBuMHCOgJRqCNCF/JPFKpdKZoIWr7GU8rhzwLijf/Gkm+FuTULs/fl2HHK6Z1QQEozzEHFsDwzl0G4IiN//eNYfHuUBXKZ3wdZzPqG0s53WHEuSBzhqBC9AtCJOs4ZZhdtwFh8iyTJ4PlsLP9DLHYHRCOAd72UO0UH8gT7gAkKVo1I4L540DilowOR9SttH7MM/oOs9qhKlnG61FgqkYGW8zGzF/yNEXO+beVAK1RVvuO+FDnuq/g36TRnUieei5GpAZ+96CSoCIxykdvHx8R+smTNF/5erlowV4ci+tcI7",
'__VIEWSTATEENCRYPTED':"",
'__VIEWSTATEGENERATOR':"85862B00",
'__VIEWSTATE':"+a+3jrBEKxDdkPOzx2wXwKaTMWvCB60WPaHRfJUAZQrdFIpxSqFr5VseTclpGzeHXdxaFnxJe/PkxDKYa7sj3Wiv/os1bNeX0IEB3s45eFsHYWGiU8cvsXCGa5z7rrGRDL5hotg7k/MuUWj8w27xXZO423MN5OsHS+wh+tC/5/Xix+w3zxuQhi8jR5DnreimHbhGZn1sYaKYIGCc8mDIDRNl+w1OZ058F+3LAx96QUu5BYiMYOmrlyxrb9b2yPTmmIrI4NtC4ClBQlxuST5wMDP3vUqqWMhn4auk8ev5gHyPestCRrsAXWs07wDNnikemMwo/4wPiTEbnZQV6SLcDUw0gZpXjXwLI7mhsVjEyVNaQnJp6+Wi6FLsAEEMlFYmQut3JecpVIUkjF9uYSN2GLIbXHPs37AiEXPeQ8E/GyBMx3z1X5l8sw/xSNmFgYQC3riajn8V0+SdkuV2PbNbYKtc+uoSCNLppLYCqiOv5eWanGvAQro2Q67FBA4w2xY+V/K8mzHaGMLoDBxJxLslWyJpL5cX0C6qoXVUu8B028auAQM4eVzH1YPF5qrJiCDo",
#'__VIEWSTATE':"W+m8kNAS6QHiRPo+zFj00EDs/Dbq+y/XvtCmSNwOIkGKlikAlphT8HBAWQDskSm1vdNterBuo0Hy7m4xPbXMOnyEm6IlseXO3jPw+ofnI2WHAKknLil+GeS0IfMWGeoD5aNyiz3zh1jZkKU7R7hQsxwARoHRyjhf8UCooFbkVvL6ddHVYZbH5LcocmCF1BTOCqYN5y5yzfDfYbp3KNW9kH53pdmwCsjiEirdxxUGDoG1Ke3JBEXfSl+4XubirHSR8z+VlFmPPXZGU8mMogwq9Eg822RYjvbwvZG74djcf7kdfB9KXCPO9u6cWIjLiW+cfXHSXD+1XYFVf9ATU2/NV4YbUzsI4PJRwoGD4BryUNIm2JFeT4c8F4REYTA16shxz5mDTFQ6rbmg6SmqP8G9gAc2Hr9ABD8+2BUNabGhNZ8wDIZArfYS4pl5DNrlPlpqeCjhmvv0znKAJSOac3pCUej8G90ZGwQKOPORWbNVzQShoH7QvrXV8pCklcia6psuAGO+Oj72oDWPxedE4DjdjX5TbLoW4bzsk/YNfUv4JpjGR8DWpG8IFYJG9CCjMEYb",
'__LASTFOCUS':"",
'__EVENTARGUMENT':"",
'__EVENTTARGET':"",
'ctl00_MainContent_ToolkitScriptManager1_HiddenField':";;AjaxControlToolkit,+Version=4.1.51116.0,+Culture=neutral,+PublicKeyToken=28f01b0e84b6d53e:en-US:fd384f95-1b49-47cf-9b47-2fa2a921a36a:475a4ef5:addc6819:5546a2b:d2e10b12:effe2a26:37e2e5c9:5a682656:c7029a2:e9e598a9"},method='POST',cookies = cookies1)
def parse1(self, response):
path1 = "id('Panel1')"
value1 = response.xpath(path1).extract_first()[:574]
print(value1)

Building Data Structure out of JSON output

Hi Im working on writing a script that pulls data out of a ticketing system. Once it pulls the data it analyze the content of it and if it the content match a specific criteria then it needs to build a data structure file that will be dump in the same server.
I was able to parse the data in JSON format, listed below is the content:
[{"id"=>10423,
"type"=>"Ticket",
"lastUpdated"=>"2014-11-04T10:58:47Z",
"shortSubject"=>"FOO STATUS UPDATE",
"shortDetail"=>"Reply to this message if all systems are functional..",
"displayClient"=>"No Client",
"updateFlagType"=>0,
"prettyLastUpdated"=>"54 minutes ago",
"latestNote"=>
{"id"=>16850,
"type"=>"TechNote",
"mobileListText"=>"<b>t. trust: </b> All Systems are OK",
"noteColor"=>"clear",
"noteClass"=>"bubble right"}},
{"id"=>10422,
"type"=>"Ticket",
"lastUpdated"=>"2014-11-04T10:54:07Z",
"shortSubject"=>"FOO STATUS UPDATE",
"shortDetail"=>"Reply to this message if all systems are functional..",
"displayClient"=>"No Client",
"updateFlagType"=>0,
"prettyLastUpdated"=>"58 minutes ago",
"latestNote"=>nil},
{"id"=>10421,
"type"=>"Ticket",
"lastUpdated"=>"2014-11-04T10:53:17Z",
"shortSubject"=>"FOO STATUS UPDATE",
"shortDetail"=>"Reply to this message if all systems are functional..",
"displayClient"=>"No Client",
"updateFlagType"=>0,
"prettyLastUpdated"=>"59 minutes ago",
"latestNote"=>nil}]
In the data above you can see that each ticket has an id, lastupdate, short Subject, short Detail and lastest note the value of the latest note will be nill if no one reply to the ticket but if someone does reply then the value mobileListText will have something.
So what I need to do pretty much is once I get this data the script will look for the subject that complies with "FOO STATUS UPDATE" if that value matches then looks for the content of the shortDetail matches "Reply to this message if all systems are functional.." and if this complies then looks for its latestNote, if latestNote is nill then it will create a log file specifiying date and time when it run, the id of the ticket with this state and a message saying, ticket has not being reply, but if the latest note has the value "mobileListText"=>"t. trust: All Systems are OK", then creates the follwing data structure:
{"LastUpdate":1415130257,"Service":[{"time":"11-04-2014 10:58:47 GMT","region:":"","id":"","description":"All Systems are OK","service":""},{"time":"11-04-2014 10:54:07 GMT","region:":"","id":"","description":"All Systems are OK","service":""},{"time":"11-04-2014 10:53:17 GMT","region:":"","id":"","description":"All Systems are OK","service":""}]}
Im able to have part of this however, based on the data above, only one ticket has All Systems are OK, meaining that only one of the tickets has being reply, and it only should write something like this:
{"LastUpdate":1415130257,"Service":[{"time":"11-04-2014 10:58:47 GMT","region:":"","id":"","description":"All Systems are OK","service":""}]}
But Instead repeats this only ticket that has being replied serveral times.
this my code so far:
require 'rubygems'
require 'json'
require 'net/http'
require 'highline/import'
require 'pp'
require 'logger'
#usersol='foo'
#passol= 'foo123'
#urlsol= "http://dev-webhelpdesk.foo.corp:8081/helpdesk/WebObjects/Helpdesk.woa/ra/Tickets?list=group&page=1&limit=#{#limit}&username=#{#usersol}&password=#{#passol}"
#limit = '25'
#log = #log= Logger.new( 'message_solar.log')
def ticket_data #looks for ticket data in solarwinds
resp = Net::HTTP.get_response(URI.parse(#urlsol))
url_output = resp.body
JSON.parse(url_output)
end
#CRONJOB THAT START ALL
#echo "Reply to this message if all systems are functional.." | mail -r noc#foo.com -s "FOO STATUS UPDATE:" noc-team#FOO.com >> /dev/null
# Looking for all the tickets with the following content
# ticket id, ticket subject and content
def search_allok(allok)
description = []
allok.each do |systems|
output1 = systems.has_key?'id'
if output1
systems.values_at('shortSubject').each do | subject |
output2 = subject.match(%r(TRUST STATUS UPDATE))
if output2
latestnote = systems.values_at('latestNote')
latestnote.each do |content|
if content
final = content.values_at('mobileListText')
final_ok = final[0].sub!(/^\<b\>.*\<\/b\>\s/, "")
systems_ok = final_ok.match(%r(All Systems are OK))
if systems_ok
ids = systems['id']
notify = {"LastUpdate" => Time.now.to_i, "Service" => []}
allok.each do |lastup|
reference = lastup.has_key? 'id'
if reference
timeid = lastup.values_at('lastUpdated')
timeid.each do |lines|
final=lines.split(/[-, T, Z]/)
notify["Service"] << { "time" => "#{final[1]}-#{final[2]}-#{final[0]} #{final[3]} GMT", "region:" => "", "id" => "#{ids}", "description" => "#{systems_ok}" , "service" => ''}
end
end
end
File.open("notify.json", "w") do |fileformatted|
fileformatted.puts (JSON.dump(notify))
end
else
time = Time.now
#log.info("#{time} - Ticket ID #{systems['id']} has not being updated")
end
else
#log.info("#{time} - Ticket ID #{systems['id']} has not being reply")
end
end
end
end
end
end
end
# If the content is there then it need to create
# the data structure including the lastupdated
# (time when it run the script), and the lastupdate for the ticket
# and the description All Systems OK
#This method below I added to the one above, but I was thinking on doing it separate but I encouter issues passing the information needed from above to below
def datastructure(format_file) #creates JSON file lastupdated of each ticket in the queue
notify = {"LastUpdate" => Time.now.to_i, "Service" => []}
format_file.each do |lastup|
reference = lastup.has_key? 'id'
if reference
timeid = lastup.values_at('lastUpdated')
timeid.each do |lines|
final=lines.split(/[-, T, Z]/)
notify["Service"] << { "time" => "#{final[1]}-#{final[2]}-#{final[0]} #{final[3]} GMT", "region:" => "", "id" => "", "description" => region , "service" => ''}
end
end
end
File.open("notify.json", "w") do |fileformatted|
fileformatted.puts (JSON.dump(notify))
end
end
#ticket_data
#datastructure(ticket_data)
search_allok(ticket_data)
The code you've written is basically doing a roundabout version of what is achieved by using ruby's map and select methods. See this article: Ruby Explained: Map, Select, and Other Enumerable Methods

ajax and ruby script only returning 1 record instead of many

I am doing an Ajax call, using Ruby and Sinatra. The query should return multiple rows, it only returns one though.
The ajax script is:
$(document).ready(function() {
$(".showmembers").click(function(e) {
e.preventDefault();
alert('script');
var short_id = $('#shortmembers').val();
console.log(short_id);
$.getJSON(
"/show",
{ 'id' : short_id },
function(res, status) {
console.log(res);
$('#result').html('');
$('#result').append('<input type=checkbox value=' + res["email"] + '>');
$('#result').append( res["first"] );
$('#result').append( res["last"] );
$('#result').append( res["email"] );
});
});
});
and the Ruby script is:
get '/show' do
id = params['id']
DB["select shortname, first, last, email from shortlists sh JOIN shortmembers sm ON sm.short_id = sh.list_id JOIN candidates ca ON ca.id = sm.candidate_id where sh.list_id = ?", id].each do |row|
#shortname = row[:shortname]
#first = row[:first]
#last = row[:last]
#email = row[:email]
puts #shortname
puts #first
puts #last
puts #email
halt 200, { shortname: #shortname, first: #first, last: #last, email: #email }.to_json
end
end
If I run the query directly in the terminal on postgres I get 9 rows returned but, as above on my website, it just returns the first row only.
What's the problem? No error in the console, just one record.
You have halt 200 inside your loop. This will cause Sinatra to terminate the request processing and return the result back up the stack.
To return a full set of results, you will need to do something like the following:
get '/show' do
id = params['id']
results = DB["select shortname, first, last, email from shortlists sh
JOIN shortmembers sm ON sm.short_id = sh.list_id
JOIN candidates ca ON ca.id = sm.candidate_id
where sh.list_id = ?", id].map do |row|
{
:short_name => row[:shortname],
:first=>row[:first],
:last=>row[:last],
:email=>row[:email]
}
end
halt 200, results.to_json
end
This will return the selected fields from each row as an array of hashes.
In fact, as I look at the above, the solution might even be as simple as:
get '/show' do
id = params['id']
results = DB["select shortname, first, last, email from shortlists sh
JOIN shortmembers sm ON sm.short_id = sh.list_id
JOIN candidates ca ON ca.id = sm.candidate_id
where sh.list_id = ?", id]
halt 200, results.to_json
end
since you don't seem to be selecting anything but the columns you desire in the first place.

Resources