Lambda Edge - The specified key does not exist - aws-lambda

I am using lambda edge to handle image compression with Sharp. The code works for now, but when I tried to add a new function to parse query parameter to let the user define the quality of the compression, Lambda/Cloudfront starts giving me a notice that the key is not exist even though it does exist.
The path that was used as an example is:
/compress/480/uploads/1000491600869812260.jpg?quality=30
Error that shows up on the browser:
<Error>
<Code>NoSuchKey</Code>
<Message>The specified key does not exist.</Message>
<Key>compress/480/uploads/1000491600869812260.jpg</Key>
<RequestId>5KPMBD6RNETZCA3Z</RequestId>
<HostId>
brMd/eCi6uv9s3VIl4IRHg7FlIytNA8DkgPjGfGrej4SkUsMxuEm1YHGEEll5rydO24gecIOTtE=
</HostId>
</Error>
Errors log from cloudfront:
#Version: 1.0
#Fields: date time x-edge-location sc-bytes c-ip cs-method cs(Host) cs-uri-stem sc-status cs(Referer) cs(User-Agent) cs-uri-query cs(Cookie) x-edge-result-type x-edge-request-id x-host-header cs-protocol cs-bytes time-taken x-forwarded-for ssl-protocol ssl-cipher x-edge-response-result-type cs-protocol-version fle-status fle-encrypted-fields c-port time-to-first-byte x-edge-detailed-result-type sc-content-type sc-content-len sc-range-start sc-range-end
2021-06-09 06:06:43 ORD52-C3 689 182.253.36.23 GET d32xc09eirob59.cloudfront.net /compress/480/uploads/1000491600869812260.jpg 404 - Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_15_7)%20AppleWebKit/605.1.15%20(KHTML,%20like%20Gecko)%20Version/14.1.1%20Safari/605.1.15 quality=10 - Error FPFQE5Z-XuBeAK61KaJbNqDAbypyo3BhrH7xom7GZik--UgESIVQFw== d32xc09eirob59.cloudfront.net http 426 3.726 - - - Error HTTP/1.1 - - 54708 3.726 Error application/xml - - -
In the code below, if I comment the lines that call the function to parse the quality from query parameter (marked as "The problematic line" in the code), the code works again. But, from my point of view, there is nothing wrong with the code since it is a simple regex to fetch a value.
Is there any limitation or constraint in the AWS lambda that makes it behave like that? Is there anything that I can do to make it work?
P.S. I already tried to use URL and querystring library to parse the path, but it always shows me LambdaException error, hence why I try to parse it manually with regex
Problematic line/function:
const getQuality = (path) => {
const match = path.match(/quality=(\d+)/)
const quality = parseInt(match[1], 10)
return quality
}
const quality = getQuality(path)
Full code:
'use strict'
const AWS = require('aws-sdk')
const S3 = new AWS.S3({ signatureVersion: 'v4' })
const Sharp = require('sharp')
const BUCKET = 'some-bucket'
const QUALITY = 70
// Image types that can be handled by Sharp
const SUPPORTED_IMAGE_TYPES = ['jpg', 'jpeg', 'png', 'gif', 'webp', 'svg', 'tiff']
const JSON_CONTENT_HEADER = [{ key: 'Content-Type', value: 'application/json' }]
const WEBP_CONTENT_HEADER = [{ key: 'Content-Type', value: 'image/webp' }]
const getOriginalKey = (path) => {
const match = path.match(/\/(\d+)\/([A-Za-z0-9_\-]+)\/([A-Za-z0-9_\-]+)\.(\w+)\??/)
const imageWidth = parseInt(match[1], 10)
const prefix = match[2]
const imageName = match[3]
const imageFormat = match[4]
const originalKey = `${prefix}/${imageName}.${imageFormat}`
return { originalKey, imageWidth, imageFormat }
}
const getQuality = (path) => {
const match = path.match(/quality=(\d+)/)
const quality = parseInt(match[1], 10)
return quality
}
const responseUpdate = (
response,
status,
statusDescription,
body,
contentHeader,
bodyEncoding = undefined
) => {
response.status = status
response.statusDescription = statusDescription
response.body = body
response.headers['content-type'] = contentHeader
if (bodyEncoding) {
response.bodyEncoding = bodyEncoding
}
return response
}
exports.handler = async (event, context, callback) => {
let { request, response } = event.Records[0].cf
const { uri } = request
const headers = response.headers
console.log(JSON.stringify({ status_code: response.status, uri }))
// NOTE: Check whether the image is present or not
if (response.status == 404) {
const splittedUri = uri.split('compress')
if (splittedUri.length != 2) {
callback(null, response)
return
}
// NOTE: Parse the prefix, image name, imageWidth and format
const path = splittedUri[1] // Read the required path (/480/uploads/123.jpg)
const { originalKey, imageWidth, imageFormat } = getOriginalKey(path)
if (!SUPPORTED_IMAGE_TYPES.some((type) => type == imageFormat.toLowerCase())) {
response = responseUpdate(
response,
403,
'Forbidden',
'Unsupported image type',
JSON_CONTENT_HEADER
)
callback(null, response)
return
}
try {
// NOTE: Get original image from S3
const s3Object = await S3.getObject({ Bucket: BUCKET, Key: originalKey }).promise()
if (s3Object.ContentLength == 0) {
response = responseUpdate(
response,
404,
'Not Found',
'The image does not exist',
JSON_CONTENT_HEADER
)
callback(null, response)
return
}
// NOTE: Optimize the image
let sharpObject = await Sharp(s3Object.Body)
const metaData = await sharpObject.metadata()
if (imageWidth < metaData.width) {
sharpObject = await sharpObject.resize(imageWidth)
}
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// NOTE: The problematic line
const quality = getQuality(path)
// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
const compressedImageBuffer = await sharpObject.webp({ quality: QUALITY }).toBuffer()
const byteLength = Buffer.byteLength(compressedImageBuffer, 'base64')
if (byteLength == metaData.size) {
callback(null, response)
return
}
if (byteLength >= 1046528) {
response = responseUpdate(
response,
400,
'Invalid size',
'The size of compressed image is too big',
JSON_CONTENT_HEADER
)
callback(null, response)
return
}
// NOTE: Generate a binary response with an optimized image
response = responseUpdate(
response,
200,
'OK',
compressedImageBuffer.toString('base64'),
WEBP_CONTENT_HEADER,
'base64'
)
response.headers['cache-control'] = [{ key: 'cache-control', value: 'max-age=31536000' }]
} catch (err) {
console.error(err)
}
} else {
headers['content-type'] = WEBP_CONTENT_HEADER
}
return response
}

Related

Visual regression in Cypress

I am doing a visual regression :
code: plugins/indexts
on('task', {
graphicsMatch: ({ img1Path, img2Path, diffImgPath }) => {
const img1 = PNG.sync.read(fs.readFileSync(img1Path));
const img2 = PNG.sync.read(fs.readFileSync(img2Path));
const { width, height } = img1;
const diff = new PNG({ width, height });
let pixelCount = pixelmatch(img1.data, img2.data, diff.data);
fs.writeFileSync(diffImgPath, PNG.sync.write(diff));
return pixelCount;
},
});
stepdef:
And('I capture the screenshot of {string}', (editorDocument: string) => {
cy.wait(5000);
cy.captureScreen(editorDocument).as('generatedFileName');
});
Then(
'the generated pdf is as expected {string} and {string}',
(nameOfFeatureFile: string, editorScreenshot: string) => {
filesApi.comparePixelDiff(
nameOfFeatureFile,
editorScreenshot,
Cypress.currentTest.title,
);
},
);
API's:
comparePixelDiff(
nameOfFeaturefile: string,
actualFileName: string,
expectedFileName: string,
) {
cy.get('#generatedFileName').then((receivedFileName) => {
const sourceFilePath = `${FilesApi.screesnShotsFolder}${FilesApi.visualTestingFeaturesFolder}/${nameOfFeaturefile}/${actualFileName}${FilesApi.graphicExt}`;
const expectedFilePath = `${FilesApi.expectedExportsFolder}${expectedFileName}${FilesApi.graphicExt}`;
const diffFilePath = `${FilesApi.actualExportsFolder}${actualFileName}-${FilesApi.graphicsDiffSufix}${FilesApi.graphicExt}`;
cy.task('graphicsMatch', {
img1Path: sourceFilePath,
img2Path: expectedFilePath,
diffImgPath: diffFilePath,
}).then((pixelsDiff) => {});
});
}
}
i am using pixelmatch here,i have used viewport as 1280x720 in json file locally it works fine but fails on CI as the resolution of screenshot is not persistent
i have even tried cy.viewport(1280, 720) before capturescreenshot it didnt work as well.
How do i fix this issue, please help.

how to get the path from image_cropper in flutter?

I want to upload the cropped image in the server but I don't know how to get image path from the cropper. how do I get the path from the cropper. below is my code for cropping and for uploading the cropped image.
this is my code for cropping.
`void _cropImage(filePath) async {
CroppedFile? _croppedFile = await ImageCropper().cropImage(
sourcePath: filePath,
aspectRatioPresets: [
CropAspectRatioPreset.square,
CropAspectRatioPreset.ratio3x2,
CropAspectRatioPreset.original,
CropAspectRatioPreset.ratio4x3,
CropAspectRatioPreset.ratio16x9
],
uiSettings: [
AndroidUiSettings(
toolbarTitle: 'Cropper',
toolbarColor: Colors.deepOrange,
toolbarWidgetColor: Colors.white,
initAspectRatio: CropAspectRatioPreset.original,
lockAspectRatio: false),
IOSUiSettings(
title: 'Cropper',
),
],);
// compressFormat: ImageCompressFormat.jpg);
// cropImagePath.value = croppedFile!.path;
if (_croppedFile != null) {
setState(() {
imageFile = _croppedFile.path;
});
}
}
this is my code for uploading
`uploadImage() async {
var request = http.MultipartRequest(
'POST', Uri.parse('http://hsdgfddf/api/examples/add'));
request.files.add(await http.MultipartFile.fromPath(
'picture', croppedfile!.path));
http.StreamedResponse response = await request.send();
if (response.statusCode == 200) {
print(await response.stream.bytesToString());
}
else {
print(response.reasonPhrase);
}
}
You can get the image path when you set it once you received the modified cropped image is received.
Hope this helps.
try this
var request = http.MultipartRequest(
'POST',
Uri.parse("API"),
);
Map<String, String> headers = {"Content-type": "multipart/form-data"};
request.files.add(
http.MultipartFile(
'pic' //picture_index,
selectedImage.readAsBytes().asStream(),
selectedImage.lengthSync(),
filename: croppedImage.path.split('/').last,
),
);
request.headers.addAll(headers);
print("request: " + request.toString());
var res = await request.send();
http.Response response = await http.Response.fromStream(res);

cloud functions-firestore: how to get tokens from 2 queries

I want to send notifications to 2 different types of users when a client document is created. First, I have to search providers collection for nearby providers who want nearby clients, and for remote providers who don't care about distance so that I can send notifications to all of them. I tried to implement these 2 queries, and send notifications, but failed. The error message was Registration token(s) provided to sendToDevice() must be a non-empty string or a non-empty array. What is wrong with this following code?
exports.clientRegisterNotification = functions.firestore
.document("crews/{crew}/clients/{client}")
.onCreate(async (snapshot) => {
try {
const clientGeopoint = snapshot.data().g.geopoint;
const clientField = snapshot.data().field;
const nearCollection = geofirestore.collectionGroup("providers");
const remoteCollection = firestore.collectionGroup("providers");
const query1 = remoteCollection.where("wantNear", "==", 'false')
.where("field", "==", clientField);
const query2 = nearCollection.near({center: clientGeopoint, radius: 10}).where("wantNear", "==", 'true')
.where("field", "==", clientField);
const tokenArray = [];
const [remotePro, nearPro] = await Promise.all([query1.get(), query2.get()]);
remotePro.docs.forEach((doc) => {
const token1 = doc.data().fcmToken;
tokenArray.push(token1);
nearPro.docs.forEach((doc) => {
const token2 = doc.data().fcmToken;
tokenArray.push(token2);
});
const message = {
"notification": {
title: ...,
body: ...,
},
};
admin.messaging().sendToDevice(tokenArray, message);
} catch (error) {
console.log(error);
}
});

How come drive API return a result when using invalid access token

My Scenario
I'm using Google Drive API to create a file and to get a list of files.
My problem
1. No matter what value I put in my access_token the API keeps working
2. If I change the order of events and I call createDriveFile before I call listDriveFiles I get this error:
Error: Invalid Credentials
at Gaxios._request (/Users/tamirklein/superquery/bd/lambda/node_modules/googleapis-common/node_modules/google-auth-library/node_modules/gaxios/src/gaxios.ts:109:15)
at
at process._tickDomainCallback (internal/process/next_tick.js:228:7)
My code
if (!global._babelPolyfill) {
var a = require("babel-polyfill")
}
import {google} from 'googleapis'
describe('Run query with API', async () => {
it('check Drive APIs', async () => {
process.env.x_region = 'us-east-1';
let result = await test('start')
})
async function test(p1) {
let auth = getBasicAuthObj();
auth.setCredentials({
access_token: "anyValueWork",
refresh_token: "Replace With a valid refresh Token"
});
let fileList = await listDriveFiles(auth);
let newFile = await createDriveFile(auth);
}
async function listDriveFiles(auth) {
return new Promise((resolved) => {
const {google} = require('googleapis');
const drive = google.drive({version: 'v3', auth});
drive.files.list({
pageSize: 10,
fields: 'nextPageToken, files(id, name)',
q: 'trashed=false'
}, (err, res) => {
if (err) {
console.log('The API returned an error: ' + err);
resolved([err, null]);
} else {
const files = res.data.files;
if (files.length) {
console.log(`We fetched ${files.length} Files`);
// files.map((file) => {
// console.log(`${file.name} (${file.id})`);
// });
} else {
console.log('No files found.');
}
resolved([err, res]);
}
});
});
}
async function createDriveFile(auth) {
return new Promise(async (resolved) => {
//const fs = require('fs');
const {google} = require('googleapis');
const drive = google.drive({version: 'v3', auth});
let data = {
value: 'value'
};
let fileName = 'fileName.txt';
let fileMetadata = {
'name': fileName
};
// create buffer
let stream = require('stream');
let bufferStream = new stream.PassThrough();
bufferStream.end(Buffer.from(JSON.stringify(data)));
let media = {
mimeType: 'application/json',
body: bufferStream // fs.createReadStream("test.txt") //bufferStream //
};
drive.files.create({
resource: fileMetadata,
media: media,
fields: 'id'
}, function (err, file) {
if (err) {
// Handle error
console.error("Error: savePasswordInDrive" + err);
} else {
console.log('File Id: ', file.data.id);
}
resolved([err, file]);
});
})
}
async function _wait(milliseconds) {
return new Promise(resolved => {
setTimeout(() => {
resolved()
}, milliseconds)
})
}
/**
* Create oAuth object
* #returns {OAuth2Client}
*/
function getBasicAuthObj() {
let clientId = 'Replace With a valid clientId';
let clientSecret = 'Replace With a valid clientSecret';
let redirectUrl = 'URL';
return new google.auth.OAuth2(
clientId,
clientSecret,
redirectUrl
)
}
})
Any ideas on how to resolve this?

Spot the difference between these two images

Programmatically, my code is detecting a difference between two classes of images, and always rejecting one class, while always allowing the other.
I have yet to find any difference between the images that yield the error and the ones that don't an yield error. But there has to be some difference, because the ones that yield an error do so 100% of the time, and the others work as expected 100% of the time.
In particular, I have inspected color format: RGB in both groups; size: no notable difference; datatype: uint8 in both; magnitude of pixel values: similar in both.
Below are two images that never work, followed by two images that always work:
This image never works: https://www.colourbox.com/preview/11906131-maple-tree-and-grass-silhouette.jpg
This image never works: http://feldmanphoto.com/wp-content/uploads/awe-inspiring-house-clipart-black-and-white-disney-coloring-pages-big-clipartxtras-illistration-background-housewives-bouncy.jpeg
This image always works: http://www.spacedesign.us/wp-content/uploads/landscape-with-old-tree-and-grass-over-white-background-black-and-black-and-white-trees.jpg
This image always works: http://www.modernhouse.co/wp-content/uploads/2017/07/1024px-RoseSeidlerHouseSulmanPrize.jpg
How can I spot the difference?
The scenario is that I am using Firebase with Swift iOS front end to send these images to a Google Cloud ML-engine hosted convnet. Some images work all the time and certain others never work as above. Further, all images work when I use the gcloud versions predict CLI. To me the issue is necessarily something in the images. Hence I am posting here for the imaging group. Code is included as requested for completeness.
CODE of index.js file is included:
'use strict';
const functions = require('firebase-functions');
const gcs = require('#google-cloud/storage');
const admin = require('firebase-admin');
const exec = require('child_process').exec;
const path = require('path');
const fs = require('fs');
const google = require('googleapis');
const sizeOf = require('image-size');
admin.initializeApp(functions.config().firebase);
const db = admin.firestore();
const rtdb = admin.database();
const dbRef = rtdb.ref();
function cmlePredict(b64img) {
return new Promise((resolve, reject) => {
google.auth.getApplicationDefault(function (err, authClient) {
if (err) {
reject(err);
}
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
authClient = authClient.createScoped([
'https://www.googleapis.com/auth/cloud-platform'
]);
}
var ml = google.ml({
version: 'v1'
});
const params = {
auth: authClient,
name: 'projects/myproject-18865/models/my_model',
resource: {
instances: [
{
"image_bytes": {
"b64": b64img
}
}
]
}
};
ml.projects.predict(params, (err, result) => {
if (err) {
reject(err);
} else {
resolve(result);
}
});
});
});
}
function resizeImg(filepath) {
return new Promise((resolve, reject) => {
exec(`convert ${filepath} -resize 224x ${filepath}`, (err) => {
if (err) {
console.error('Failed to resize image', err);
reject(err);
} else {
console.log('resized image successfully');
resolve(filepath);
}
});
});
}
exports.runPrediction = functions.storage.object().onChange((event) => {
fs.rmdir('./tmp/', (err) => {
if (err) {
console.log('error deleting tmp/ dir');
}
});
const object = event.data;
const fileBucket = object.bucket;
const filePath = object.name;
const bucket = gcs().bucket(fileBucket);
const fileName = path.basename(filePath);
const file = bucket.file(filePath);
if (filePath.startsWith('images/')) {
const destination = '/tmp/' + fileName;
console.log('got a new image', filePath);
return file.download({
destination: destination
}).then(() => {
if(sizeOf(destination).width > 224) {
console.log('scaling image down...');
return resizeImg(destination);
} else {
return destination;
}
}).then(() => {
console.log('base64 encoding image...');
let bitmap = fs.readFileSync(destination);
return new Buffer(bitmap).toString('base64');
}).then((b64string) => {
console.log('sending image to CMLE...');
return cmlePredict(b64string);
}).then((result) => {
console.log(`results just returned and is: ${result}`);
let predict_proba = result.predictions[0]
const res_pred_val = Object.keys(predict_proba).map(k => predict_proba[k])
const res_val = Object.keys(result).map(k => result[k])
const class_proba = [1-res_pred_val,res_pred_val]
const opera_proba_init = 1-res_pred_val
const capitol_proba_init = res_pred_val-0
// convert fraction double to percentage int
let opera_proba = (Math.floor((opera_proba_init.toFixed(2))*100))|0
let capitol_proba = (Math.floor((capitol_proba_init.toFixed(2))*100))|0
let feature_list = ["houses", "trees"]
let outlinedImgPath = '';
let imageRef = db.collection('predicted_images').doc(filePath.slice(7));
outlinedImgPath = `outlined_img/${filePath.slice(7)}`;
imageRef.set({
image_path: outlinedImgPath,
opera_proba: opera_proba,
capitol_proba: capitol_proba
});
let predRef = dbRef.child("prediction_categories");
let arrayRef = dbRef.child("prediction_array");
predRef.set({
opera_proba: opera_proba,
capitol_proba: capitol_proba,
});
arrayRef.set({first: {
array_proba: [opera_proba,capitol_proba],
brief_description: ["a","b"],
more_details: ["aaaa","bbbb"],
feature_list: feature_list},
zummy1: "",
zummy2: ""});
return bucket.upload(destination, {destination: outlinedImgPath});
});
} else {
return 'not a new image';
}
});
Issue was that the bad images were grayscale, not RGB as expected by my model. I initially had checked this first by looking at the shape. But the 'bad' images had 3 color channels, each of those 3 channels stored the same number --- so my model was refusing to accept them. Also, as expected and contrary to what I initially thought I observed, turns out the gcloud ML-engine predict CLI actually also failed for these images. Took me 2 days to figure this out!

Resources