boto3 aws ec2 status/instance check fail email script - amazon-ec2

Hi I am trying to run a schedule script, not via lambda but from a pc or onsite schedule job - maybe every 10 mins or so to check all my ec2 instances are failing any system/instance status checks. If it fails then send email of the ec2 instance that failing - just send the name. Do not reboot
so here is what i have
import boto3
import smtplib
region_name='us-west-1'
#ec2_client = boto3.client('ec2')
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#response = ec2_client.describe_instance_status(IncludeAllInstances = True)
client = boto3.client("ec2")
clientsns = boto3.client("sns")
#response = clientsns.create_topic
status = client.describe_instance_status(IncludeAllInstances = True)
#for instance in response['InstanceStatuses']:
# instance_id = instance['InstanceId']
# system_status = instance['SystemStatus']
# instance_status = instance['InstanceStatus']
# print (instance_id, system_status, instance_status)
for i in status["InstanceStatuses"]:
print("AvailabilityZone :", i["AvailabilityZone"])
print("InstanceId :", i["InstanceId"])
print("InstanceState :", i["InstanceState"])
print("InstanceStatus", i["InstanceStatus"])
print("SystemStatus", i["SystemStatus"])
in_status = i['InstanceStatus']['Details'][0]['Status']
sys_status = i['SystemStatus']['Details'][0]['Status']
msg1 = 'The following instances failed status checks, i[{InstanceId}]'
msg2 = 'All Instances passed System/instance checks!'
# check statuses
if ((in_status != 'passed') or (sys_status != 'passed')):
print(bcolors.WARNING + 'Reboot required for', i["InstanceId"] + bcolors.ENDC)
clientsns.publish(TopicArn='arn:aws:sns:us-west-1:462518063038:test',Message=msg1)
else:
print('All Instances passed System/instance checks!')
clientsns.publish(TopicArn='arn:aws:sns:us-west-1:462518063038:test',Message=msg2)
The problem I have is its sending one message per instance. I just wwant to sent one email for all instances. any idea?
Thank you

Related

Use pysmb to operate the smb server as annoymous user

This is the package I grabbed for someone else's smb server, it can log in to the server anonymously, but I can't。
enter image description here
I found that the difference between the packet I sent to the target and the one he sent is that there is Unicode password.
enter image description here
I've no idea how to remove the item.Below is my python code。
from smb import smb_structs
from smb.SMBConnection import SMBConnection
smb_structs.SUPPORT_EXTENDED_SECURITY = False
smb_structs.SMB_FLAGS_CANONICALIZED_PATHS = False
smb_structs.SMB_FLAGS2_UNICODE = False
smb_structs.SMB_FLAGS2_IS_LONG_NAME = False
smb_structs.SMB_FLAGS2_NT_STATUS = False
smb_structs.CAP_UNICODE = False
smb_structs.CAP_LARGE_FILES = False
smb_structs.CAP_STATUS32 = False
def transfer():
smb = SMBConnection(username, password, "", "IIE-BCHIOVTGH68<20>", sign_options=0, use_ntlm_v2=False)
try:
res = smb.connect(ip)
folders = smb.listShares()
for i in folders:
print(i.name)
except Exception as e:
print(e)
smb.close()
smb.close()
if __name__ == '__main__':
ip, port, username, password = '192.168.20.102', 139, "", ""
transfer()

Asychronous server hanging in python

Am currently using python asynchronous server to server some clients , the server works well at first then eventually hangs without showing if it is receiving and requests from clients, when I press ctr-c it then shows that it receives the requests from the clients and it does not stop, it just continues to revieve requests .
I don't where this bug is emanating from.
Thank u in advance
import asyncio, json
from collections import Coroutine
from typing import Any
import Engine
import struct
header_struct = struct.Struct("!Q") # messages up to 2**64 - 1 in length
async def recvall(reader, length):
blocks = []
while length:
block = await reader.read(length)
if not block:
raise EOFError('socket closed with {} bytes left in this block'.format(length))
length -= len(block)
blocks.append(block)
return b''.join(blocks)
async def get_block(reader):
data = await recvall(reader, header_struct.size)
(block_length,) = header_struct.unpack(data)
return await recvall(reader, block_length)
async def put_block(writer, message):
block_length = len(message)
writer.write(header_struct.pack(block_length))
writer.write(message)
# await writer.drain()
async def handle_conversation(reader, writer):
address__ = writer.get_extra_info("peername")
print("Accepted connection from {}".format(address__))
while True:
# ************************try to check if there data to send*********************************
try:
block = await get_block(reader)
# decode the data
data = block.decode()
decoded_data = json.loads(data)
# dont forget to make this synchronous
answer = await Engine.get_answer(decoded_data["Task"], decoded_data["content"])
# don't forget to check in there is necessary data to push and making sure data is conveyed
await put_block(writer, answer)
print(answer)
except Exception as e:
raise
if __name__ == '__main__':
address = Engine.parse_command_line("asyncio server using coroutine")
# loop = asyncio.get_event_loop()
# coro = asyncio.start_server(handle_conversation, *address)
async def main():
server = await asyncio.start_server(
handle_conversation, *address)
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
asyncio.run(main(), debug=True)
Engine Code
import argparse
import json
import time
import upload_pic_and_search
import accept_connections
import connect
import opinion_poll
import share
import zmq
from jsonrpclib import Server
context = zmq.Context()
aphorisms = {"share": share.share_,
"poll": opinion_poll.add_poll,
"add_profile_pic": upload_pic_and_search.profile_pic,
"connect": connect.connect,
"accept_connection": accept_connections.accept_connection}
def sighn_up(doc):
"""this function will connect to sighn up """
proxy = Server('http://localhost:7002')
answer = proxy.sighn_up(doc)
return answer
def Verification(doc):
"""Code verification routine"""
proxy = Server('http://localhost:7002')
answer = proxy.verify(doc)
return answer
def login(doc):
"""This function handkes authetication"""
proxy = Server('http://localhost:7002')
answer = proxy.autheticate(doc)
return answer
def post(doc):
"""connect to server that handles posts"""
proxy = Server('http://localhost:6700')
answer = proxy.post(doc)
return answer
def comment(doc):
"""connect to the server that stores comments"""
proxy = Server('http://localhost:6701')
answer = proxy.comments_(doc)
return answer
def reply(doc):
"""store the reply"""
proxy = Server('http://localhost:6702')
answer = proxy.reply(doc)
return answer
def share(doc):
"""share the post"""
proxy = Server('http://localhost:6703')
answer = proxy.share(doc)
return answer
def likes(doc):
"""connect to the likes queue"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6704")
osock.send_json(doc)
return {"Task": "like", "like": True}
def follow(doc):
"""handles the follow coroutine"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6705")
osock.send_json(doc)
def connect(doc):
"""connect to routine for connection"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6706")
osock.send_json(doc)
def accept_connection(doc):
"""the queue responsible accepting connections"""
zcontext = zmq.Context()
osock = zcontext.socket(zmq.PUSH)
osock.connect("tcp://127.0.0.1:6707")
osock.send_json(doc)
def add_profile_pic(doc):
"""Add the profile pic of the user"""
proxy = Server('http://localhost:7006')
answer = proxy.profile_pic(doc)
return answer
def search(doc):
"""search the user in the database"""
proxy = Server('http://localhost:7006')
answer = proxy.search(doc)
return answer
def profile(doc):
"""search the user in the database"""
proxy = Server('http://localhost:7006')
answer = proxy.profile(doc)
return answer
async def get_answer(aphorism, content):
"""Return the response to particular question"""
# time.sleep(0.0)
# fetch responsible function
# function = aphorisms.get(aphorism, "Error:Unknown aphorism.")
function = eval(aphorism)
answer = function(content)
return send(answer)
def send(data):
"""Prepare the data to be sent via socket"""
json_data = json.dumps(data)
data_bytes = json_data.encode()
return data_bytes
def parse_command_line(description):
"""arse command line and return a socket address."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('host', help="IP or hostname")
parser.add_argument("-p", metavar='port', type=int, default=1060, help="TCP port (default 1060)")
args = parser.parse_args()
address = (args.host, args.p)
return address
def recv_untill(sock, suffix):
"""Receive bytes over socket `sock` until we receive the `suffix`."""
message = sock.recv(4096)
if not message:
raise EOFError("Socket closed")
while not message.endswith(suffix):
data = sock.recv(4096)
if not data:
raise IOError('received {!r} then socket closed'.format(message))
message += data
return message

Loading test data using batch Tensorflow

The following code is my pipeline for reading images and labels from files:
import tensorflow as tf
import numpy as np
import tflearn.data_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
import sys
#process labels in the input file
def process_label(label):
info=np.zeros(6)
...
return info
def read_label_file(file):
f = open(file, "r")
filepaths = []
labels = []
lines = []
for line in f:
tokens = line.split(",")
filepaths.append([tokens[0],tokens[1],tokens[2]])
labels.append(process_label(tokens[3:]))
lines.append(line)
return filepaths, np.vstack(labels), lines
def get_data_batches(params):
# reading labels and file path
train_filepaths, train_labels, train_line = read_label_file(params.train_info)
test_filepaths, test_labels, test_line = read_label_file(params.test_info)
# convert string into tensors
train_images = ops.convert_to_tensor(train_filepaths)
train_labels = ops.convert_to_tensor(train_labels)
train_line = ops.convert_to_tensor(train_line)
test_images = ops.convert_to_tensor(test_filepaths)
test_labels = ops.convert_to_tensor(test_labels)
test_line = ops.convert_to_tensor(test_line)
# create input queues
train_input_queue = tf.train.slice_input_producer([train_images, train_labels, train_line], shuffle=params.shuffle)
test_input_queue = tf.train.slice_input_producer([test_images, test_labels, test_line],shuffle=False)
# process path and string tensor into an image and a label
train_image=None
for i in range(train_input_queue[0].get_shape()[0]):
file_content = tf.read_file(params.path_prefix+train_input_queue[0][i])
train_imageT = (tf.to_float(tf.image.decode_jpeg(file_content, channels=params.num_channels)))*(1.0/255)
train_imageT = tf.image.resize_images(train_imageT,[params.load_size[0],params.load_size[1]])
train_imageT = tf.random_crop(train_imageT,size=[params.crop_size[0],params.crop_size[1],params.num_channels])
train_imageT = tf.image.random_flip_up_down(train_imageT)
train_imageT = tf.image.per_image_standardization(train_imageT)
if(i==0):
train_image = train_imageT
else:
train_image = tf.concat([train_image, train_imageT], 2)
train_label = train_input_queue[1]
train_lineInfo = train_input_queue[2]
test_image=None
for i in range(test_input_queue[0].get_shape()[0]):
file_content = tf.read_file(params.path_prefix+test_input_queue[0][i])
test_imageT = tf.to_float(tf.image.decode_jpeg(file_content, channels=params.num_channels))*(1.0/255)
test_imageT = tf.image.resize_images(test_imageT,[params.load_size[0],params.load_size[1]])
test_imageT = tf.image.central_crop(test_imageT, (params.crop_size[0]+0.0)/params.load_size[0])
test_imageT = tf.image.per_image_standardization(test_imageT)
if(i==0):
test_image = test_imageT
else:
test_image = tf.concat([test_image, test_imageT],2)
test_label = test_input_queue[1]
test_lineInfo = test_input_queue[2]
# define tensor shape
train_image.set_shape([params.crop_size[0], params.crop_size[1], params.num_channels*3])
train_label.set_shape([66])
test_image.set_shape( [params.crop_size[0], params.crop_size[1], params.num_channels*3])
test_label.set_shape([66])
# collect batches of images before processing
train_image_batch, train_label_batch, train_lineno = tf.train.batch([train_image, train_label, train_lineInfo],batch_size=params.batch_size,num_threads=params.num_threads,allow_smaller_final_batch=True)
test_image_batch, test_label_batch, test_lineno = tf.train.batch([test_image, test_label, test_lineInfo],batch_size=params.test_size,num_threads=params.num_threads,allow_smaller_final_batch=True)
if(params.loadSlice=='all'):
return train_image_batch, train_label_batch, train_lineno, test_image_batch, test_label_batch, test_lineno
elif params.loadSlice=='train':
return train_image_batch, train_label_batch
elif params.loadSlice=='test':
return test_image_batch, test_label_batch
elif params.loadSlice=='train_info':
return train_image_batch, train_label_batch, train_lineno
elif params.loadSlice=='test_info':
return test_image_batch, test_label_batch, test_lineno
else:
return train_image_batch, train_label_batch, test_image_batch, test_label_batch
I want to use the same pipeline for loading the test data. The size of my test data is huge and I cannot load all of them at once.
I have 20453 test examples which is not an integer multiply of the batch size (here 512).
How can I read all of my test examples via this pipeline one and only one time and then measure the performance on them?
Currently, I am using this code for batching my test data and it does not work. It always read a full batch from the queue even when I set allow_smaller_final_batch to True
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess,"checkpoints2/snapshot-16")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
more = True
num_examples=0
while(more):
img_test, lbl_test, lbl_line=sess.run([test_image_batch,test_label_batch,test_lineno])
print(lbl_test.shape)
size=lbl_test.shape[0]
num_examples += size
if size<args.batch_size:
more = False
sess.close()
This is the code of my model:
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.normalization import batch_normalization
from tflearn.layers.estimator import regression
from tflearn.activations import relu
def get_alexnet(x,num_output):
network = conv_2d(x, 64, 11, strides=4)
network = batch_normalization(network,epsilon=0.001)
network = relu (network)
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 192, 5)
network = batch_normalization(network,epsilon=0.001)
network = relu(network)
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 384, 3)
network = batch_normalization(network,epsilon=0.0001)
network = relu(network)
network = conv_2d(network, 256, 3)
network = batch_normalization(network,epsilon=0.001)
network = relu(network)
network = conv_2d(network, 256, 3)
network = batch_normalization(network,epsilon=0.001)
network = relu(network)
network = max_pool_2d(network, 3, strides=2)
network = fully_connected(network, 4096)
network = batch_normalization(network,epsilon=0.001)
network = relu(network)
network = dropout(network, 0.5)
network = fully_connected(network, 4096)
network = batch_normalization(network,epsilon=0.001)
network = relu(network)
network = dropout(network, 0.5)
network1 = fully_connected(network, num_output)
network2 = fully_connected(network, 12)
network3 = fully_connected(network,6)
return network1,network2,network3
This simply could be achieved by setting num_epochs=1 and allow_smaller_final_batch= True!
One solution is set batch_size=size of test set

Running into rate limit for Boto3 EC2 create_snapshots

I ran the code at the bottom of this post in my environment and got the following error after a few successes:
An error occurred (SnapshotCreationPerVolumeRateExceeded) when calling the CreateSnapshot operation: The maximum per volume CreateSnapshot request rate has been exceeded. Use an increasing or variable sleep interval between requests.
I'm used to doing something like this to paginate my results using a MaxResults variable and the NextToken returned by the response:
maxResults = 100
result = ec2.describe_instances(MaxResults=maxResults)
nextToken = result['NextToken']
instance_ids = []
for reservation in result['Reservations']:
for instances in reservation['Instances']:
for i in instances:
instance_ids.append(i['InstanceId'])
size = len(instance_ids)
while size == maxResults:
result = ec2.describe_instances(MaxResults=maxResults, NextToken=nextToken)
nextToken = result['NextToken']
size = len(instance_ids)
# etc...
However, because I'm already filtering by tag in my describe_instances call, I'm not allowed to pass a maxResults parameter as well. Additionally, create_snapshot's call signature only allows me to specify a dry run, the volume ID, and a description of the snapshot, and does not return a nextToken or similar. How can I avoid this error - must I introduce a sleep like the error message suggests?
Lambda function code:
from __future__ import print_function
import boto3
import datetime
import time
ec2 = boto3.client('ec2')
def createScheduleSnapshots(event, context):
errors = []
try:
print("Creating snapshots on " + str(datetime.datetime.today()) + ".")
schedulers = ec2.describe_instances(Filters=[{'Name':'tag:GL-sub-purpose', 'Values':['Schedule']}])
schedule_instances = []
for reservation in schedulers['Reservations']:
for instance in reservation['Instances']:
schedule_instances.append(instance)
print("Performing backup on " + str(len(schedule_instances)) + " schedules.")
successful = []
failed = []
for s in schedule_instances:
try:
instanceId=s['InstanceId']
blockDeviceMappings = s['BlockDeviceMappings']
snapshotDescription = instanceId + "-" + str(datetime.date.today().strftime('%Y-%m-%d')) + "-46130e7ac954-automated"
for bd_maps in blockDeviceMappings:
if (bd_maps['DeviceName'] == '/dev/sdf'): #Don't back up OS
volumeId = bd_maps['Ebs']['VolumeId']
print("\tSnapshotting " + instanceId)
ec2.create_snapshot(
VolumeId=volumeId,
Description=snapshotDescription
)
successful.append(instanceId)
except Exception as e:
print(e)
errors.append(e)
failed.append(instanceId + " :\t" + str(e))
print("Performed backup on " + str(len(successful)) + " schedulers. Failed backup on " + str(len(failed)) + " schedulers. ")
except Exception as e:
print(e)
errors.append(e)
if len(errors) == 0:
return "Success"
else:
raise Exception("Errors during invocation of Lambda. " + str(errors))

Python 3 Tkinter multiple functions running at once

I have a chat window for the client portion of a chat application that uses Tkinter for the GUI:
import socket
import select
import time
from threading import Thread
from multiprocessing import Process
import sys
from tkinter import *
HOST = "localhost"
PORT = 5678
client_socket = socket.socket()
client_socket.settimeout(2)
try:
client_socket.connect((HOST, PORT))
except:
print("Connection failed.")
sys.exit()
print("Connected to [" + str(HOST) + "," + str(PORT) + "] successfully")
class ChatWindow:
def __init__(self):
form = Tk()
form.minsize(200, 200)
form.resizable(0, 0)
form.title("Chat")
box = Entry(form)
form.bind("<Return>", lambda x: self.sendmessage(self.textbox.get()))
area = Text(form, width=20, height=10)
area.config(state=DISABLED)
area.grid(row=0, column=1, padx=5, pady=5, sticky=W)
box.grid(row=1, column=1, padx=5, pady=5, sticky=W)
self.textbox = box
self.textarea = area
p1 = Process(target=updating)
p1.start()
p2 = Process(target=tryrecvmessage)
p2.start()
def addchat(self, msg, clear=False):
self.textarea.config(state=NORMAL)
self.textarea.insert(END, msg + "\n")
if clear:
# Option to clear text in box on adding
self.textbox.delete(0, END)
self.textarea.see(END)
self.textarea.config(state=DISABLED)
def sendmessage(self, msg):
data = str.encode(msg)
client_socket.send(data)
self.addchat("<You> " + msg, True)
def updating(self):
while True:
form.update()
form.update_idletasks()
time.sleep(0.01)
def tryrecvmessage(self):
while True:
read_sockets, write_sockets, error_sockets = select.select([client_socket], [], [])
for sock in read_sockets:
data = sock.recv(4096)
if data:
self.addchat(data)
else:
self.addchat("Disconnected...")
sys.exit()
if __name__ == "__main__":
window = ChatWindow()
I want the updating() function and the tryrecvmessage() function to run simultaneously, so that the GUI continues to update while the client still receives messages from the server. I've tried using the threading module as well, but I need to have the threads created below where the other functions are defined, but the other functions need to be defined below __init__(). What do I do?
You can attach the functions to the Tk event loop using the after method, as I explained in this question. Essentially the syntax for after goes like this:
after(ms, command = [function object])
What it does is attach the function object passed in as the command argument to the Tk event loop, repeating it each time ms milliseconds has passed.
One caveat here: you would want to remove the while True from the functions as after would be constantly repeating them anyway.

Resources