Parse Server S3 file adapter with Heroku app - heroku

I am trying to set up the s3 file adapter but I'm not sure if i am getting the formatting of something incorrect or something. I have followed this:
https://github.com/ParsePlatform/parse-server/wiki/Configuring-File-Adapters#configuring-s3adapter
Guide exactly but when i uncomment the block of code below and put in my aws credentials then push the setup back to Heroku the app or dashboard won't start any longer, saying there is an application error:
//**** File Storage ****//
filesAdapter: new S3Adapter(
{
"xxxxxxxx",
"xxxxxxxx",
"xxxxxxxx",
{directAccess: true}
}
)

I would set it up as follows for Heroku:
Make sure that after performing all steps described in the guide your policy looks similar to this:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::BUCKET_NAME",
"arn:aws:s3:::BUCKET_NAME/*"
]
}
]
}
Now apply this policy to the bucket: select your bucket in S3 console, tap ‘Properties’ button in the top right corner. Expand ‘Permissions’ section, press ‘Edit bucket policy’ and paste json above in the text field.
Configure Parse Server in the index.js file:
var S3Adapter = require('parse-server').S3Adapter;
var s3Adapter = new S3Adapter(
"AWS_KEY",
"AWS_SECRET_KEY",
"bucket-name",
{ directAccess: true }
);
and add two lines to the Parse Server init (var api = new ParseServer({..})):
filesAdapter: s3Adapter,
fileKey: process.env.PARSE_FILE_KEY

Similar to Cliff's post, .S3Adapter has to be outside the ()
var S3Adapter = require('parse-server').S3Adapter;
And then inside parse server init:
filesAdapter: new S3Adapter(
{
accessKey: process.env.S3_ACCESS_KEY || '',
secretKey: process.env.S3_SECRET_KEY || '',
bucket: process.env.S3_BUCKET || '',
directAccess: true
}
)
This worked in this case.

Related

How do I access .env variables and use it inside the cypress.json file?

I have five different cypress projects in the same repo.
The Cypress.json file of each project has reporterOptions :
{
"fixturesFolder": "./src/fixtures",
"integrationFolder": "./src/integration",
……..
"reporter": "../../node_modules/mocha-testrail-reporter",
"reporterOptions": {
"username": "my-user-name”,
"password": "my-password",
"host": "https://abc.testrail.io",
"domain": "abc.testrail.io",
"projectId": 1,
"suiteId": 3,
"includeAllInTestRun": true,
"runName": "test"
}
}
The Username, host, password and domain value are same for all five cypress projects. Thus, I want to put them in the .env file like this, and access these variables and use them in the Cypress.json files
USERNAME= my-user-name
PASSWORD= my-password
HOST= https://abc.testrail.io
DOMAIN= abc.testrail.io
How do I access these variables? Any help will be appreciated. Thank you,
Take a look at Extending the Cypress Config File
Cypress does not support extends syntax in its configuration file
But in plugins it can be done
module.exports = (on, config) => {
const reporterParams = require('.env') // not quite sure of the format
// may need to fiddle it
const reportOptions = {
...config.reporterOptions, // spread existing options
"username": reporterParams.username,
"password": reporterParams.password,
"host": reporterParams.host,
"domain": reporterParams.domain,
}
const merged = {
...config,
reportOptions
}
return merged
}

How to deploy Next.js with GraphQL backend on Zeit Now?

I have an Next.js/Express/Apollo GraphQL app running fine on localhost.
I try to deploy it on Zeit Now, and the Next.js part works fine, but the GraphQL backend fails because /graphql route returns:
502: An error occurred with your deployment
Code: NO_STATUS_CODE_FROM_LAMBDA
My now.json looks like:
{
"version": 2,
"builds": [
{ "src": "next.config.js", "use": "#now/next" },
{ "src": "server/server.js", "use": "#now/node" }
],
"routes": [
{ "src": "/api/(.*)", "dest": "server/server.js" },
{ "src": "/graphql", "dest": "server/server.js" }
]
}
Suggestions?
Here’s a complete example of Next.js/Apollo GraphQL running both on Zeit Now (as serverless function/lambda) and Heroku (with an Express server):
https://github.com/tomsoderlund/nextjs-pwa-graphql-sql-boilerplate
I was getting that error until I found on a solution on the Wes Bos slack channel.
The following worked for me, but it's possible you could be getting that error for a different reason.
I'm not sure why it works.
You can see it working here
cd backend
Run npm install graphql-import
Update scripts in package.json:
"deploy": "prisma deploy --env-file variables.env&& npm run writeSchema",
"writeSchema": "node src/writeSchema.js"
Note: For non windows users make sure to place space before &&
Create src/writeSchema.js:
const fs = require('fs');
const { importSchema } = require('graphql-import');
const text = importSchema("src/generated/prisma.graphql");
fs.writeFileSync("src/schema_prep.graphql", text)
Update src/db.js:
const db = new Prisma({
typeDefs: __dirname + "/schema_prep.graphql",
...
});
Update src/createServer.js:
return new GraphQLServer({
typeDefs: __dirname + '/schema.graphql',
...
});
Update src/schema.graphql:
# import * from './schema_prep.graphql'
Create now.json
{
"version": 2,
"name": "Project Name",
"builds": [
{ "src": "src/index.js", "use": "#now/node-server" }
],
"routes": [
{ "src": "/.*", "dest": "src/index.js" }
],
"env": {
"SOME_VARIABLE": "xxx",
...
}
}
Run npm run deploy to initially create schema_prep.graphql.
Run now
Another reply said this:
You should not mix graphql imports and js/ts imports. The syntax on the graphql file will be interpreted by graphql-import and will be ignored by ncc (the compiler which reads the __dirname stuff and move the file to the correct directory etc)
In my example 'schema_prep.graphql' is already preprocessed with the imports from the generated graphql file.
Hopefully this helps.

Ionic 3 Cordova ajax calls fail on Windows 10 (UWP)

I have attempted to ask this previously, buy got no real answers, and have now been struggling for over a month.
I just cannot get my ajax calls to work on an Ionic 3 Cordova application built for a Windows 10 UWP. They can access localhost, but not any outside connections.
The application works fine on both Android and iOS.
I am trying to test this locally on my dev machine. I use a certificate (bought) to sign the application, install this certificate, build the application for Windows, and am able to open up the built CordovaApp.Windows10_1.0.1.1_x86.appxupload, and then double click the embedded CordovaApp.Windows10_1.0.1.1_x86.appx file to install, which completes successfully. The install indicates the app need internet access.
In the config.xml, I have the following tags, as suggested elsewhere...
<allow-navigation href="*" />
<access origin="*" />
However, when I run, the http.get call just returns 0 with no other information. I can run in Visual Studio, and look at the returned error object, and get no further info, apart from this 0 return.
I have run fiddler, enabled the https decryption as explained here, but all I see in the response header is
HTTP/1.0 200 Connection Established
FiddlerGateway: Direct
StartTime: 13:44:21.686
Connection: close
The result in the main view actually shows 200, so I don't think this is showing me anything real.
I am at a complete loss. I have no where else to search. What could I be missing?
Should I be able to use external ajax on a Windows 10 machine, when I have sideloaded the application as here? I haven't tried from the store yet, as I don't want to upload until I know it works.
Any suggestions desperately welcomed. Surely someone has had an Ionic 3 application accessing external ajax working?
Thanks in advance for any help
[UPDATE 1]
If I run the application on the same machine, just using Ionic serve (so it just runs in the browser rather than hosted in the UWP), the ajax calls also work fine.
[UPDATE 2]
I have now created a Cordova application using the Visual Studio template, so taking all other frameworks out of the equation.
I used vanilla JavaScript to do my rest call...
document.addEventListener('deviceready', callUrl, false);
function callUrl() {
console.log('callUrl');
var xhr = new XMLHttpRequest();
xhr.open('GET', 'https://myserveraddress.com/myapp/testroute');
xhr.send(null);
xhr.onreadystatechange = function () {
var DONE = 4; // readyState 4 means the request is done.
var OK = 200; // status 200 is a successful return.
if (xhr.readyState === DONE) {
if (xhr.status === OK)
console.log(xhr.responseText);
} else {
console.log('Error: ' + xhr.status);
}
}
};
I run this in the debugger, and even here I get an error (status code of 0).
Another thing I noticed when I open up the build package and look at the cordova_plugins.js file..
My Ionic app has the following...
cordova.define('cordova/plugin_list', function(require, exports, module) {
module.exports = [
{
"id": "cordova-plugin-console.logger",
"file": "plugins/cordova-plugin-console/www/logger.js",
"pluginId": "cordova-plugin-console",
"clobbers": [
"cordova.logger"
]
},
{
"id": "cordova-plugin-console.console",
"file": "plugins/cordova-plugin-console/www/console-via-logger.js",
"pluginId": "cordova-plugin-console",
"clobbers": [
"console"
]
},
{
"id": "cordova-plugin-device.device",
"file": "plugins/cordova-plugin-device/www/device.js",
"pluginId": "cordova-plugin-device",
"clobbers": [
"device"
]
},
{
"id": "cordova-plugin-device.DeviceProxy",
"file": "plugins/cordova-plugin-device/src/windows/DeviceProxy.js",
"pluginId": "cordova-plugin-device",
"merges": [
""
]
},
{
"id": "cordova-plugin-splashscreen.SplashScreen",
"file": "plugins/cordova-plugin-splashscreen/www/splashscreen.js",
"pluginId": "cordova-plugin-splashscreen",
"clobbers": [
"navigator.splashscreen"
]
},
{
"id": "cordova-plugin-splashscreen.SplashScreenProxy",
"file": "plugins/cordova-plugin-splashscreen/www/windows/SplashScreenProxy.js",
"pluginId": "cordova-plugin-splashscreen",
"runs": true
},
{
"id": "cordova-plugin-statusbar.statusbar",
"file": "plugins/cordova-plugin-statusbar/www/statusbar.js",
"pluginId": "cordova-plugin-statusbar",
"clobbers": [
"window.StatusBar"
]
},
{
"id": "cordova-plugin-statusbar.StatusBarProxy",
"file": "plugins/cordova-plugin-statusbar/src/windows/StatusBarProxy.js",
"pluginId": "cordova-plugin-statusbar",
"runs": true
},
{
"id": "ionic-plugin-keyboard.KeyboardProxy",
"file": "plugins/ionic-plugin-keyboard/src/windows/KeyboardProxy.js",
"pluginId": "ionic-plugin-keyboard",
"clobbers": [
"cordova.plugins.Keyboard"
],
"runs": true
}
];
module.exports.metadata =
// TOP OF METADATA
{
"cordova-plugin-console": "1.0.5",
"cordova-plugin-device": "1.1.4",
"cordova-plugin-splashscreen": "4.0.3",
"cordova-plugin-statusbar": "2.2.2",
"cordova-plugin-whitelist": "1.3.1",
"ionic-plugin-keyboard": "2.2.1"
};
// BOTTOM OF METADATA
});
Now, I notice every plugin in the module.exports.metadata also has an entry in the module.exports EXCEPT for cordova-plugin-whitelist!
If I open the same file for the Corvoda application created in VS, I see the following...
cordova.define('cordova/plugin_list', function(require, exports, module) {
module.exports = [];
module.exports.metadata =
// TOP OF METADATA
{
"cordova-plugin-whitelist": "1.2.2"
};
// BOTTOM OF METADATA
});
So this has nothing else for the whitelist plugin as well
Could there be something missing here?? Could this white-list plugin not be installed correctly?
I had a similar situation where my ajax calls worked fine in TEST, but when I moved to PROD, they would fail.
The answer was finally tracked down as a missing intermediary certificate on the server I was trying to access. TEST had the cert, PROD did not.
I hope this helps.

How do I use CloudFormation resources in a Lambda function?

I have added a Redis ElastiCache section to my s-resource-cf.json (a CloudFormation template), and selected its hostname as an output.
"Resources": {
...snip...
"Redis": {
"Type": "AWS::ElastiCache::CacheCluster",
"Properties": {
"AutoMinorVersionUpgrade": "true",
"AZMode": "single-az",
"CacheNodeType": "cache.t2.micro",
"Engine": "redis",
"EngineVersion": "2.8.24",
"NumCacheNodes": "1",
"PreferredAvailabilityZone": "eu-west-1a",
"PreferredMaintenanceWindow": "tue:00:30-tue:01:30",
"CacheSubnetGroupName": {
"Ref": "cachesubnetdefault"
},
"VpcSecurityGroupIds": [
{
"Fn::GetAtt": [
"sgdefault",
"GroupId"
]
}
]
}
}
},
"Outputs": {
"IamRoleArnLambda": {
"Description": "ARN of the lambda IAM role",
"Value": {
"Fn::GetAtt": [
"IamRoleLambda",
"Arn"
]
}
},
"RedisEndpointAddress": {
"Description": "Redis server host",
"Value": {
"Fn::GetAtt": [
"Redis",
"Address"
]
}
}
}
I can get CloudFormation to output the Redis server host when running sls resources deploy, but how can I access that output from within a Lambda function?
There is nothing in this starter project template that refers to that IamRoleArnLambda, which came with the example project. According to the docs, templates are only usable for project configuration, they are not accessible from Lambda functions:
Templates & Variables are for Configuration Only
Templates and variables are used for configuration of the project only. This information is not usable in your lambda functions. To set variables which can be used by your lambda functions, use environment variables.
So, then how do I set an environment variable to the hostname of the ElastiCache server after it has been created?
You can set environment variables in the environment section of a function's s-function.json file. Furthermore, if you want to prevent those variables from being put into version control (for example, if your code will be posted to a public GitHub repo), you can put them in the appropriate files in your _meta/variables directory and then reference those from your s-function.json files. Just make sure you add a _meta line to your .gitignore file.
For example, in my latest project I needed to connect to a Redis Cloud server, but didn't want to commit the connection details to version control. I put variables into my _meta/variables/s-variables-[stage]-[region].json file, like so:
{
"redisUrl": "...",
"redisPort": "...",
"redisPass": "..."
}
…and referenced the connection settings variables in that function's s-function.json file:
"environment": {
"REDIS_URL": "${redisUrl}",
"REDIS_PORT": "${redisPort}",
"REDIS_PASS": "${redisPass}"
}
I then put this redis.js file in my functions/lib directory:
module.exports = () => {
const redis = require('redis')
const jsonify = require('redis-jsonify')
const redisOptions = {
host: process.env.REDIS_URL,
port: process.env.REDIS_PORT,
password: process.env.REDIS_PASS
}
return jsonify(redis.createClient(redisOptions))
}
Then, in any function that needed to connect to that Redis database, I imported redis.js:
redis = require('../lib/redis')()
(For more details on my Serverless/Redis setup and some of the challenges I faced in getting it to work, see this question I posted yesterday.)
update
CloudFormation usage has been streamlined somewhat since that comment was posted in the issue tracker. I have submitted a documentation update to http://docs.serverless.com/docs/templates-variables, and posted a shortened version of my configuration in a gist.
It is possible to refer to a CloudFormation output in a s-function.json Lambda configuration file, in order to make those outputs available as environment variables.
s-resource-cf.json output section:
"Outputs": {
"redisHost": {
"Description": "Redis host URI",
"Value": {
"Fn::GetAtt": [
"RedisCluster",
"RedisEndpoint.Address"
]
}
}
}
s-function.json environment section:
"environment": {
"REDIS_HOST": "${redisHost}"
},
Usage in a Lambda function:
exports.handler = function(event, context) {
console.log("Redis host: ", process.env.REDIS_HOST);
};
old answer
Looks like a solution was found / implemented in the Serverless issue tracker (link). To quote HyperBrain:
CF Output variables
To have your lambda access the CF output variables you have to give it the cloudformation:describeStacks access rights in the lambda IAM role.
The CF.loadVars() promise will add all CF output variables to the process'
environment as SERVERLESS_CF_OutVar name. It will add a few ms to the
startup time of your lambda.
Change your lambda handler as follows:
// Require Serverless ENV vars
var ServerlessHelpers = require('serverless-helpers-js');
ServerlessHelpers.loadEnv();
// Require Logic
var lib = require('../lib');
// Lambda Handler
module.exports.handler = function(event, context) {
ServerlessHelpers.CF.loadVars()
.then(function() {
lib.respond(event, function(error, response) {
return context.done(error, response);
});
})
};

The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256

I get an error AWS::S3::Errors::InvalidRequest The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256. when I try upload file to S3 bucket in new Frankfurt region. All works properly with US Standard region.
Script:
backup_file = '/media/db-backup_for_dev/2014-10-23_02-00-07/slave_dump.sql.gz'
s3 = AWS::S3.new(
access_key_id: AMAZONS3['access_key_id'],
secret_access_key: AMAZONS3['secret_access_key']
)
s3_bucket = s3.buckets['test-frankfurt']
# Folder and file name
s3_name = "database-backups-last20days/#{File.basename(File.dirname(backup_file))}_#{File.basename(backup_file)}"
file_obj = s3_bucket.objects[s3_name]
file_obj.write(file: backup_file)
aws-sdk (1.56.0)
How to fix it?
Thank you.
AWS4-HMAC-SHA256, also known as Signature Version 4, ("V4") is one of two authentication schemes supported by S3.
All regions support V4, but US-Standard¹, and many -- but not all -- other regions, also support the other, older scheme, Signature Version 2 ("V2").
According to http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html ... new S3 regions deployed after January, 2014 will only support V4.
Since Frankfurt was introduced late in 2014, it does not support V2, which is what this error suggests you are using.
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html explains how to enable V4 in the various SDKs, assuming you are using an SDK that has that capability.
I would speculate that some older versions of the SDKs might not support this option, so if the above doesn't help, you may need a newer release of the SDK you are using.
¹US Standard is the former name for the S3 regional deployment that is based in the us-east-1 region. Since the time this answer was originally written,
"Amazon S3 renamed the US Standard Region to the US East (N. Virginia) Region to be consistent with AWS regional naming conventions." For all practical purposes, it's only a change in naming.
With node, try
var s3 = new AWS.S3( {
endpoint: 's3-eu-central-1.amazonaws.com',
signatureVersion: 'v4',
region: 'eu-central-1'
} );
You should set signatureVersion: 'v4' in config to use new sign version:
AWS.config.update({
signatureVersion: 'v4'
});
Works for JS sdk.
For people using boto3 (Python SDK) use the below code
from botocore.client import Config
s3 = boto3.resource(
's3',
aws_access_key_id='xxxxxx',
aws_secret_access_key='xxxxxx',
config=Config(signature_version='s3v4')
)
I have been using Django, and I had to add these extra config variables to make this work. (in addition to settings mentioned in https://simpleisbetterthancomplex.com/tutorial/2017/08/01/how-to-setup-amazon-s3-in-a-django-project.html).
AWS_S3_REGION_NAME = "ap-south-1"
Or previous to boto3 version 1.4.4:
AWS_S3_REGION_NAME = "ap-south-1"
AWS_S3_SIGNATURE_VERSION = "s3v4"
Similar issue with the PHP SDK, this works:
$s3Client = S3Client::factory(array('key'=>YOUR_AWS_KEY, 'secret'=>YOUR_AWS_SECRET, 'signature' => 'v4', 'region'=>'eu-central-1'));
The important bit is the signature and the region
AWS_S3_REGION_NAME = "ap-south-1"
AWS_S3_SIGNATURE_VERSION = "s3v4"
this also saved my time after surfing for 24Hours..
Code for Flask (boto3)
Don't forget to import Config. Also If you have your own config class, then change its name.
from botocore.client import Config
s3 = boto3.client('s3',config=Config(signature_version='s3v4'),region_name=app.config["AWS_REGION"],aws_access_key_id=app.config['AWS_ACCESS_KEY'], aws_secret_access_key=app.config['AWS_SECRET_KEY'])
s3.upload_fileobj(file,app.config["AWS_BUCKET_NAME"],file.filename)
url = s3.generate_presigned_url('get_object', Params = {'Bucket':app.config["AWS_BUCKET_NAME"] , 'Key': file.filename}, ExpiresIn = 10000)
In Java I had to set a property
System.setProperty(SDKGlobalConfiguration.ENFORCE_S3_SIGV4_SYSTEM_PROPERTY, "true")
and add the region to the s3Client instance.
s3Client.setRegion(Region.getRegion(Regions.EU_CENTRAL_1))
With boto3, this is the code :
s3_client = boto3.resource('s3', region_name='eu-central-1')
or
s3_client = boto3.client('s3', region_name='eu-central-1')
For thumbor-aws, that used boto config, i needed to put this to the $AWS_CONFIG_FILE
[default]
aws_access_key_id = (your ID)
aws_secret_access_key = (your secret key)
s3 =
signature_version = s3
So anything that used boto directly without changes, this may be useful
Supernova answer for django/boto3/django-storages worked with me:
AWS_S3_REGION_NAME = "ap-south-1"
Or previous to boto3 version 1.4.4:
AWS_S3_REGION_NAME = "ap-south-1"
AWS_S3_SIGNATURE_VERSION = "s3v4"
just add them to your settings.py and change region code accordingly
you can check aws regions from:
enter link description here
For Android SDK, setEndpoint solves the problem, although it's been deprecated.
CognitoCachingCredentialsProvider credentialsProvider = new CognitoCachingCredentialsProvider(
context, "identityPoolId", Regions.US_EAST_1);
AmazonS3 s3 = new AmazonS3Client(credentialsProvider);
s3.setEndpoint("s3.us-east-2.amazonaws.com");
Basically the error was because I was using old version of aws-sdk and I updated the version so this error occured.
in my case with node js i was using signatureVersion in parmas object like this :
const AWS_S3 = new AWS.S3({
params: {
Bucket: process.env.AWS_S3_BUCKET,
signatureVersion: 'v4',
region: process.env.AWS_S3_REGION
}
});
Then I put signature out of params object and worked like charm :
const AWS_S3 = new AWS.S3({
params: {
Bucket: process.env.AWS_S3_BUCKET,
region: process.env.AWS_S3_REGION
},
signatureVersion: 'v4'
});
Check your AWS S3 Bucket Region and Pass proper Region in Connection Request.
In My Senario I have set 'APSouth1' for Asia Pacific (Mumbai)
using (var client = new AmazonS3Client(awsAccessKeyId, awsSecretAccessKey, RegionEndpoint.APSouth1))
{
GetPreSignedUrlRequest request1 = new GetPreSignedUrlRequest
{
BucketName = bucketName,
Key = keyName,
Expires = DateTime.Now.AddMinutes(50),
};
urlString = client.GetPreSignedURL(request1);
}
In my case, the request type was wrong. I was using GET(dumb) It must be PUT.
Here is the function I used with Python
def uploadFileToS3(filePath, s3FileName):
s3 = boto3.client('s3',
endpoint_url=settings.BUCKET_ENDPOINT_URL,
aws_access_key_id=settings.BUCKET_ACCESS_KEY_ID,
aws_secret_access_key=settings.BUCKET_SECRET_KEY,
region_name=settings.BUCKET_REGION_NAME
)
try:
s3.upload_file(
filePath,
settings.BUCKET_NAME,
s3FileName
)
# remove file from local to free up space
os.remove(filePath)
return True
except Exception as e:
logger.error('uploadFileToS3#Error')
logger.error(e)
return False
Sometime the default version will not update. Add this command
AWS_S3_SIGNATURE_VERSION = "s3v4"
in settings.py
For Boto3 , use this code.
import boto3
from botocore.client import Config
s3 = boto3.resource('s3',
aws_access_key_id='xxxxxx',
aws_secret_access_key='xxxxxx',
region_name='us-south-1',
config=Config(signature_version='s3v4')
)
Try this combination.
const s3 = new AWS.S3({
endpoint: 's3-ap-south-1.amazonaws.com', // Bucket region
accessKeyId: 'A-----------------U',
secretAccessKey: 'k------ja----------------soGp',
Bucket: 'bucket_name',
useAccelerateEndpoint: true,
signatureVersion: 'v4',
region: 'ap-south-1' // Bucket region
});
I was stuck for 3 days and finally, after reading a ton of blogs and answers I was able to configure Amazon AWS S3 Bucket.
On the AWS Side
I am assuming you have already
Created an s3-bucket
Created a user in IAM
Steps
Configure CORS settings
you bucket > permissions > CORS configuration
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>POST</AllowedMethod>
<AllowedMethod>PUT</AllowedMethod>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>```
Generate A bucket policy
your bucket > permissions > bucket policy
It should be similar to this one
{
"Version": "2012-10-17",
"Id": "Policy1602480700663",
"Statement": [
{
"Sid": "Stmt1602480694902",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::harshit-portfolio-bucket/*"
}
]
}
PS: Bucket policy should say `public` after this
Configure Access Control List
your bucket > permissions > acces control list
give public access
PS: Access Control List should say public after this
Unblock public Access
your bucket > permissions > Block Public Access
Edit and turn all options Off
**On a side note if you are working on django
add the following lines to you settings.py file of your project
**
#S3 BUCKETS CONFIG
AWS_ACCESS_KEY_ID = '****not to be shared*****'
AWS_SECRET_ACCESS_KEY = '*****not to be shared******'
AWS_STORAGE_BUCKET_NAME = 'your-bucket-name'
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# look for files first in aws
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# In India these settings work
AWS_S3_REGION_NAME = "ap-south-1"
AWS_S3_SIGNATURE_VERSION = "s3v4"
Also coming from: https://simpleisbetterthancomplex.com/tutorial/2017/08/01/how-to-setup-amazon-s3-in-a-django-project.html
For me this was the solution:
AWS_S3_REGION_NAME = "eu-central-1"
AWS_S3_ADDRESSING_STYLE = 'virtual'
This needs to be added to settings.py in your Django project
Using PHP SDK Follow Below.
require 'vendor/autoload.php';
use Aws\S3\S3Client;
use Aws\S3\Exception\S3Exception;
$client = S3Client::factory(
array(
'signature' => 'v4',
'region' => 'me-south-1',
'key' => YOUR_AWS_KEY,
'secret' => YOUR_AWS_SECRET
)
);
Nodejs
var aws = require("aws-sdk");
aws.config.update({
region: process.env.AWS_REGION,
secretAccessKey: process.env.AWS_S3_SECRET_ACCESS_KEY,
accessKeyId: process.env.AWS_S3_ACCESS_KEY_ID,
});
var s3 = new aws.S3({
signatureVersion: "v4",
});
let data = await s3.getSignedUrl("putObject", {
ContentType: mimeType, //image mime type from request
Bucket: "MybucketName",
Key: folder_name + "/" + uuidv4() + "." + mime.extension(mimeType),
Expires: 300,
});
console.log(data);
AWS S3 Bucket Permission Configuration
Deselect Block All Public Access
Add Below Policy
{
"Version":"2012-10-17",
"Statement":[{
"Sid":"PublicReadGetObject",
"Effect":"Allow",
"Principal": "*",
"Action":["s3:GetObject"],
"Resource":["arn:aws:s3:::MybucketName/*"
]
}
]
}
Then Paste the returned URL and make PUT request on the URL with binary file of image
Full working nodejs version:
const AWS = require('aws-sdk');
var s3 = new AWS.S3( {
endpoint: 's3.eu-west-2.amazonaws.com',
signatureVersion: 'v4',
region: 'eu-west-2'
} );
const getPreSignedUrl = async () => {
const params = {
Bucket: 'some-bucket-name/some-folder',
Key: 'some-filename.json',
Expires: 60 * 60 * 24 * 7
};
try {
const presignedUrl = await new Promise((resolve, reject) => {
s3.getSignedUrl('getObject', params, (err, url) => {
err ? reject(err) : resolve(url);
});
});
console.log(presignedUrl);
} catch (err) {
if (err) {
console.log(err);
}
}
};
getPreSignedUrl();

Resources