I am trying to write an API for uploading and downloading files. After I uploaded a test file, it was not shown on cyberduck, but I can download the test file that I just uploaded.
Then I try to download the file that exist on cyberduck, but it shows
com.emc.object.s3.S3Exception: The specified key does not exist.
API code:
StorageImpl.java
#Service
public class StorageImpl implements Storage {
private static Logger logger = LoggerFactory.getLogger(Storage.class);
#Value("${storage.file.repository}")
private String fileRepository;
#Value("${object.storage.user}")
private String oUser;
#Value("${object.storage.endpoint}")
private String endpoint;
#Value("${object.storage.bucket}")
private String nBucket;
#Value("${object.storage.key.secret}")
private String nSecret;
#Value("${object.storage.region.name}")
private String nRegion;
private S3Client s3client;
public StorageImpl() {
}
#Inject
void init() {
try {
s3client = this.getS3Client();
} catch (Exception e) {
e.printStackTrace();
}
}
private S3Client getS3Client() {
if (s3client == null) {
try {
SSLContext sc = SSLContext.getInstance("TLSv1.2");
sc.init(null, null, new java.security.SecureRandom());
HttpsURLConnection.setDefaultSSLSocketFactory(new PreferredCipherSuiteSSLSocketFactory(sc.getSocketFactory()));
String hostname = "";
URI uri = null;
try {
uri = new URI(endpoint);
hostname = uri.getHost();
} catch (URISyntaxException e) {
logger.error("URL " + endpoint + " is a malformed URL");
e.printStackTrace();
}
S3Config config = null;
config = new S3Config(new URI(endpoint)).withUseVHost(false);
logger.debug("oUser=" + oUser + ", secret=" + nSecret + ", endpoint=" + endpoint);
config.withIdentity(oUser).withSecretKey(nSecret);
logger.debug("");
config.setSignMetadataSearch(true);
s3client = new S3JerseyClient(config, new URLConnectionClientHandler());
logger.debug("s3client initiated. endpoint: " + endpoint);
} catch (Exception ex) {
logger.error(ex.getMessage(), ex);
}
}
return s3client;
}
private File convertMultiPartFileToFile(final MultipartFile multipartFile) {
final File file = new File(multipartFile.getOriginalFilename());
try (final FileOutputStream outputStream = new FileOutputStream(file)) {
outputStream.write(multipartFile.getBytes());
} catch (IOException e) {
logger.error("Error {} occurred while converting the multipart file", e.getLocalizedMessage());
}
return file;
}
#Override
public void save(final MultipartFile multipartFile) {
try {
final File file = convertMultiPartFileToFile(multipartFile);
logger.info("Uploading file with name {}", file.getName());
final PutObjectRequest putObjectRequest = new PutObjectRequest(nBucket, file.getName(), file);
s3client.putObject(putObjectRequest);
Files.delete(file.toPath()); // Remove the file locally created in the project folder
} catch (AmazonServiceException e) {
logger.error("Error {} occurred while uploading file", e.getLocalizedMessage());
} catch (IOException ex) {
logger.error("Error {} occurred while deleting temporary file", ex.getLocalizedMessage());
}catch(S3Exception ae){
ae.printStackTrace();
logger.error("", ae);
}
}
#Override
public InputStream retrieve(String fileName) {
return s3client.getObject(nBucket, fileName).getObject();
}
}
FileController.java
#RestController
#RequestMapping("/files")
#CrossOrigin(origins = "*", maxAge = 3600)
public class FileController {
private static final String MESSAGE_1 = "Uploaded the file successfully";
private static final String FILE_NAME = "fileName";
#Autowired
protected StorageImpl storageImpl;
#Autowired
protected FileService fileService;
#GetMapping
public ResponseEntity<Object> findByName(#RequestParam("fileName") String fileName) {
return ResponseEntity
.ok()
.cacheControl(CacheControl.noCache())
.header("Content-type", "application/octet-stream")
.header("Content-disposition", "attachment; filename=\"" + fileName + "\"")
.body(new InputStreamResource(storageImpl.retrieve(fileName)));
}
#PostMapping
public ResponseEntity<Object> save(#RequestParam("file") MultipartFile multipartFile) {
storageImpl.save(multipartFile);
return new ResponseEntity<>(MESSAGE_1, HttpStatus.OK);
}
}
What are the possible reasons causing this bug?
Related
We are using Spring SFTP (outbound) using Gateway to transfer files to multiple destinations. But often, few files are sent to the wrong destination. Can't find any clue as we don't get any error in our log except file count error after sending files.
Here is our configuration:
#Configuration
public class BankWiseSFTPConfig {
private final ExpressionParser EXPRESSION_PARSER;
private final BankConfigService bankConfigService;
public BankWiseSFTPConfig(BankConfigService bankConfigService) {
this.EXPRESSION_PARSER = new SpelExpressionParser();
this.bankConfigService = bankConfigService;
}
#Bean
public DelegatingSessionFactory<LsEntry> sessionFactory() {
List<BankConfigEntity> bankList = bankConfigService.getAll();
Map<Object, SessionFactory<LsEntry>> factories = new LinkedHashMap<>();
for (BankConfigEntity bank : bankList) {
DefaultSftpSessionFactory factory = new DefaultSftpSessionFactory();
factory.setHost(bank.getSftpHost());
factory.setUser(bank.getSftpUser());
factory.setPort(bank.getSftpPort());
factory.setPassword(bank.getSftpPass());
factory.setAllowUnknownKeys(true);
factories.put(bank.getBankName(), factory);
}
bankList.clear();
return new DelegatingSessionFactory<LsEntry>(factories, factories.values().iterator().next());
}
#ServiceActivator(inputChannel = "toSftp")
#Bean
public SftpMessageHandler handler() {
SftpMessageHandler handler = new SftpMessageHandler(new SftpRemoteFileTemplate(sessionFactory()));
handler.setRemoteDirectoryExpression(EXPRESSION_PARSER.parseExpression("headers['path']"));
return handler;
}
#MessagingGateway
public interface SFTPOutboundGateway {
#Gateway(requestChannel = "toSftp")
void push(File file, #Header("path") String path);
#Gateway(requestChannel = "sftpChannel")
List<String> executeCommand(String path);
}
#Bean
#ServiceActivator(inputChannel = "sftpChannel")
public MessageHandler messageHandlerLs() {
SftpOutboundGateway sftpOutboundGateway = new SftpOutboundGateway(sessionFactory(), "ls", "payload");
sftpOutboundGateway.setOptions("-1 -R");
return sftpOutboundGateway;
}
}
Here are our push and file count methods:
private void pushReport(String bankName,
String destinationPath,
String sourcePath,
String refundType,
List<BankReportEntity> failedBankReportEntities,
List<BankReportEntity> pushedFiles,
BankReportEntity bankReportEntity) {
String sftpStatus = SlotBankStatus.BANK_SFTP_INITIATED.name();
String errorReason = StringUtils.EMPTY;
String fileName = bankReportEntity.getFileName();
String filePath = sourcePath + fileName;
File file = new File(filePath);
bankReportEntity.setSftpStatus(sftpStatus);
log.debug("{} :: SFTP Push Initiated for {} and File {}", refundType, bankName, fileName);
try {
log.info("{} :: SFTP Push trying for {} and {}", refundType, bankName, file);
gateway.push(file, destinationPath);
sftpStatus = SlotBankStatus.BANK_SFTP_COMPLETED.name();
pushedFiles.add(bankReportEntity);
log.info("{} :: SFTP Push success for {} and {}", refundType, bankName, file);
} catch (Exception e) {
emailService.sendSFTPExceptionEmail(
"File push error for file : " + fileName +
" and FileTransferType " + bankReportEntity.getFileTransferType() +
". Error : " + e.getLocalizedMessage(),
bankName);
sftpStatus = SlotBankStatus.BANK_SFTP_PENDING.name();
errorReason = ErrorCode.SFTP_PUSH_FAILED.name();
failedBankReportEntities.add(bankReportEntity);
log.error("{} :: File push error for file : {}, Bank {}, FileTransferType {}, Error : {}",
refundType,
fileName,
bankName,
bankReportEntity.getFileTransferType(),
e.getMessage(),
e
);
} finally {
log.info("{} :: SFTP to {} Status Updated for : {}", refundType, bankName, bankReportEntity);
bankReportEntity.setSftpStatus(sftpStatus);
bankReportEntity.setErrorReason(errorReason);
}
}
private SFTPPushFileCountDto getSFTPSuccessfulFileCount(
String bankName,
String path,
String refundType,
List<BankReportEntity> pushedFiles,
List<BankReportEntity> failedBankReports) {
int totalSuccessfulPush = pushedFiles.size();
int totalFailedPush = failedBankReports.size();
log.info("{} :: getSFTPSuccessfulFileCount() for {}, from {}", refundType, bankName, path);
try {
List<String> remoteFiles = gateway.executeCommand(path);
for (Iterator<BankReportEntity> pushedFilesIterator = pushedFiles.iterator(); pushedFilesIterator.hasNext(); ) {
BankReportEntity bankReport = pushedFilesIterator.next();
String fileName = bankReport.getFileName();
if (!remoteFiles.contains(fileName)) {
log.error("getSFTPSuccessfulFileCount() : File not found in remote {}. File: {}", path, fileName);
totalFailedPush++;
totalSuccessfulPush--;
bankReport.setSftpStatus(SlotBankStatus.BANK_SFTP_PENDING.name());
bankReport.setErrorReason(ErrorCode.UNKNOWN_ERROR_CODE.name());
pushedFilesIterator.remove();
failedBankReports.add(bankReport);
emailService.sendSFTPExceptionEmail(
"File push error for file : " + fileName +
" and FileTransferType " + bankReport.getFileTransferType() +
". Error : " + ErrorCode.UNKNOWN_ERROR_CODE.description(),
bankName);
}
}
} catch (Exception ex) {
emailService.sendSFTPExceptionEmail("SFTP file count Failed from path " + path, bankName);
log.error("{} :: getSFTPSuccessfulFileCount() Failed for {}. Error : {}",
refundType,
bankName,
ex.getMessage(),
ex);
}
return SFTPPushFileCountDto.builder()
.totalSuccessfulPush(totalSuccessfulPush)
.totalFailedPush(totalFailedPush)
.build();
}
We can't reproduce the problem in our environment.
Can anybody help?
Basically I have a Spring Batch that queries a Database and implements Partitioner to get the Jobs, and assign the Jobs to a ThreadPoolTaskExecutors in a SlaveStep.
The Reader reads (Job) from the Database. The Writer loads the data into a csv file in an Azure Blob Storage.
The Job Partitioner and Reader works fine. The Writer writes to one file, then it closes, and the other jobs cannot finish because the stream is closed. I get the following error:
Reading: market1
Reading: market2
Reading: market3
Reading: market4
Reading: market5
Writter: /upload-demo/market3_2021-06-01.csv
Writter: /upload-demo/market5_2021-06-01.csv
Writter: /upload-demo/market4_63_2021-06-01.csv
Writter: /upload-demo/market2_2021-06-01.csv
Writter: /upload-demo/market1_11_2021-06-01.csv
2021-06-02 08:24:42.304 ERROR 20356 --- [ taskExecutor-3] c.a.storage.common.StorageOutputStream : Stream is already closed.
2021-06-02 08:24:42.307 WARN 20356 --- [ taskExecutor-3] o.s.b.f.support.DisposableBeanAdapter : Destroy method 'close' on bean with name 'scopedTarget.writer2' threw an exception: java.lang.RuntimeException: Stream is already closed.
Reading: market6
Writter: /upload-demo/market6_2021-06-01.csv
Here is my Batch Configuration:
#EnableBatchProcessing
#Configuration
public class BatchConfig extends DefaultBatchConfigurer {
String connectionString = "azureConnectionString";
String containerName = "upload-demo";
String endpoint = "azureHttpsEndpoint";
String accountName ="azureAccountName";
String accountKey = "accountKey";
StorageSharedKeyCredential credential = new StorageSharedKeyCredential(accountName, accountKey);
BlobServiceClient client = new BlobServiceClientBuilder().connectionString(connectionString).endpoint(endpoint).buildClient();
#Autowired
private StepBuilderFactory steps;
#Autowired
private JobBuilderFactory jobs;
#Autowired
#Qualifier("verticaDb")
private DataSource verticaDataSource;
#Autowired
private PlatformTransactionManager transactionManager;
#Autowired
private ConsoleItemWriter consoleItemWriter;
#Autowired
private ItemWriter itemWriter;
#Bean
public Job job() throws Exception {
return jobs.get("job1")
.start(masterStep(null, null))
.incrementer(new RunIdIncrementer())
.build();
}
#Bean
public ThreadPoolTaskExecutor taskExecutor() {
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
taskExecutor.setCorePoolSize(5);
taskExecutor.setMaxPoolSize(10);
taskExecutor.initialize();
return taskExecutor;
}
#Bean
#JobScope
public Step masterStep(#Value("#{jobParameters['startDate']}") String startDate,
#Value("#{jobParameters['endDate']}") String endDate) throws Exception {
return steps.get("masterStep")
.partitioner(slaveStep().getName(), new RangePartitioner(verticaDataSource, startDate, endDate))
.step(slaveStep())
.gridSize(5)
.taskExecutor(taskExecutor())
.build();
}
#Bean
public Step slaveStep() throws Exception {
return steps.get("slaveStep")
.<MarketData, MarketData>chunk(100)
.reader(pagingItemReader(null, null, null))
.faultTolerant()
.skip(NullPointerException.class)
.skipPolicy(new AlwaysSkipItemSkipPolicy())
.writer(writer2(null, null, null)) //consoleItemWriter
.build();
}
#Bean
#StepScope
public JdbcPagingItemReader pagingItemReader(
#Value("#{stepExecutionContext['MarketName']}") String marketName,
#Value("#{jobParameters['startDate']}") String startDate,
#Value("#{jobParameters['endDate']}") String endDate
) throws Exception {
System.out.println("Reading: " + marketName);
SqlPagingQueryProviderFactoryBean provider = new SqlPagingQueryProviderFactoryBean();
Map<String, Order> sortKey = new HashMap<>();
sortKey.put("xbin", Order.ASCENDING);
sortKey.put("ybin", Order.ASCENDING);
provider.setDataSource(this.verticaDataSource);
provider.setDatabaseType("POSTGRES");
provider.setSelectClause("SELECT MARKET AS market, EPSG AS epsg, XBIN AS xbin, YBIN AS ybin, " +
"LATITUDE AS latitude, LONGITUDE AS longitude, " +
"SUM(TOTALUPLINKVOLUME) AS totalDownlinkVol, SUM(TOTALDOWNLINKVOLUME) AS totalUplinkVol");
provider.setFromClause("FROM views.geo_analytics");
provider.setWhereClause(
"WHERE market='" + marketName + "'" +
" AND STARTTIME >= '" + startDate + "'" +
" AND STARTTIME < '" + endDate + "'" +
" AND TOTALUPLINKVOLUME IS NOT NULL" +
" AND TOTALUPLINKVOLUME > 0" +
" AND TOTALDOWNLINKVOLUME IS NOT NULL" +
" AND TOTALDOWNLINKVOLUME > 0" +
" AND EPSG IS NOT NULL" +
" AND LATITUDE IS NOT NULL" +
" AND LONGITUDE IS NOT NULL" +
" AND XBIN IS NOT NULL" +
" AND YBIN IS NOT NULL"
);
provider.setGroupClause("GROUP BY XBIN, YBIN, MARKET, EPSG, LATITUDE, LONGITUDE");
provider.setSortKeys(sortKey);
JdbcPagingItemReader reader = new JdbcPagingItemReader();
reader.setDataSource(this.verticaDataSource);
reader.setQueryProvider(provider.getObject());
reader.setFetchSize(1000);
reader.setRowMapper(new BeanPropertyRowMapper() {
{
setMappedClass((MarketData.class));
}
});
return reader;
}
#Bean
#StepScope
public FlatFileItemWriter<MarketData> writer2(#Value("#{jobParameters['yearMonth']}") String yearMonth,
#Value("#{stepExecutionContext['marketName']}") String marketName,
#Value("#{jobParameters['startDate']}") String startDate) throws URISyntaxException, InvalidKeyException, StorageException, IOException {
AZBlobWriter<MarketData> writer = new AZBlobWriter<>();
String fullPath =marketName + "_" + startDate + ".csv";
String resourceString = "azure-blob://upload-demo/" + fullPath;
CloudStorageAccount storageAccount = CloudStorageAccount.parse(connectionString);
CloudBlobClient blobClient = storageAccount.createCloudBlobClient();
CloudBlobContainer container2 = blobClient.getContainerReference(containerName);
container2.createIfNotExists();
AzureStorageResourcePatternResolver storageResourcePatternResolver = new AzureStorageResourcePatternResolver(client);
Resource resource = storageResourcePatternResolver.getResource(resourceString);
System.out.println("Writter: " + resource.getURI().getPath().toString());
writer.setResource(resource);
writer.setStorage(container2);
writer.setLineAggregator(new DelimitedLineAggregator<MarketData>() {
{
setDelimiter(",");
setFieldExtractor(new BeanWrapperFieldExtractor<MarketData>() {
{
setNames(new String[] {
"market",
"epsg",
"xbin",
"ybin",
"latitude",
"longitude",
"totalDownlinkVol",
"totalUplinkVol"
});
}
});
}
});
return writer;
}
}
Previously I ran into other issues, such as setting up the Resource for FlatFileWriter to Azure Blob, Spring Batch / Azure Storage account blob resource [container"foo", blob='bar'] cannot be resolved to absolute file path.
As suggested by #Mahmoud Ben Hassine, make an implementation of the FlatFileWriter for the Azure Blob.
The implementation for the FlatFileWriter I used as a base (GCP) from this post: how to configure FlatFileItemWriter to output the file to a ByteArrayRecource?
Here is the implementation of the Azure Blob:
public class AZBlobWriter<T> extends FlatFileItemWriter<T> {
private CloudBlobContainer storage;
private Resource resource;
private static final String DEFAULT_LINE_SEPARATOR = System.getProperty("line.separator");
private OutputStream os;
private String lineSeparator = DEFAULT_LINE_SEPARATOR;
#Override
public void write(List<? extends T> items) throws Exception {
StringBuilder lines = new StringBuilder();
for (T item : items) {
lines.append(item).append(lineSeparator);
}
byte[] bytes = lines.toString().getBytes();
try {
os.write(bytes);
}
catch (IOException e) {
throw new WriteFailedException("Could not write data. The file may be corrupt.", e);
}
os.flush();
}
#Override
public void open(ExecutionContext executionContext) {
try {
os = ((WritableResource)resource).getOutputStream();
String bucket = resource.getURI().getHost();
String filePath = resource.getURI().getPath().substring(1);
CloudBlockBlob blob = storage.getBlockBlobReference(filePath);
} catch (IOException e) {
e.printStackTrace();
} catch (StorageException e) {
e.printStackTrace();
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
#Override
public void update(ExecutionContext executionContext) {
}
#Override
public void close() {
super.close();
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
public void setStorage(CloudBlobContainer storage) {
this.storage = storage;
}
#Override
public void setResource(Resource resource) {
this.resource = resource;
}
}
Any help is greatly I appreciated. My apologies for the "dirt code", as I am still testing/developing it.
thx, Markus.
You did not share the entire stack trace to see when this error happens exactly, but it seems that the close method is called more than once. I think this is not due to a concurrency issue, as I see you are using one writer per thread in a partitioned step. So I would make this method "re-entrant" by checking if the output stream is already closed before closing it (there is no isClosed method on an output stream, so you can use a custom boolean around that).
That said, I would first confirm that the close method is called twice and if so, investigate why is that and fix the root cause.
In the edit section, I wrote how to make it upload the image
Thank you very much
#PostMapping("/save")
public String add(#ModelAttribute("category") Category category, RedirectAttributes ra,
#RequestParam("fileImage") MultipartFile multipartFile) throws IOException {
String fileName = StringUtils.cleanPath(multipartFile.getOriginalFilename());
category.setPhoto(fileName);
Category saveCategory = categoryService.save(category);
String uploadDir = "./category-logos/" + saveCategory.getId();
Path uploadPath = Paths.get(uploadDir);
if (!Files.exists(uploadPath)) {
Files.createDirectories(uploadPath);
}
try (InputStream inputStream = multipartFile.getInputStream()) {
Path filePath = uploadPath.resolve(fileName);
Files.copy(inputStream, filePath, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new IOException("could not save upload file: " + fileName);
}
return "redirect:/category/list";
}
#GetMapping("/edit/{id}")
public String edit(Model model, #PathVariable(name="id")Long id) {
//`**`**`**enter code here**`**`**`
}
I want to upload file on remote server, currently i am only able to upload on local machine. below is my code
#PostMapping("/upload")
public UploadFileResponse uploadFile(#RequestParam("file") MultipartFile file) {
String fileName = fileStorageService.storeFile(file);
String fileDownloadUri = ServletUriComponentsBuilder.fromCurrentContextPath()
.path("/downloadFile/")
.path(fileName)
.toUriString();
return new UploadFileResponse(fileName, fileDownloadUri,file.getContentType(), file.getSize());
}
file.upload-dir=C:\\Test
Thanks in Advance!
EDIT:
1. Use case : You want to upload the file locally (i.e. where your application is running):
You create StorageService interface and an implementing class FileSystemStorageService:
#Service
public class FileSystemStorageService implements StorageService {
private final Path rootLocation;
#Autowired
public FileSystemStorageService(StorageProperties properties) {
this.rootLocation = Paths.get(properties.getLocation());
}
#Override
public void store(MultipartFile file) {
String filename = StringUtils.cleanPath(file.getOriginalFilename());
try {
if (file.isEmpty()) {
throw new StorageException("Failed to store empty file " + filename);
}
if (filename.contains("..")) {
// This is a security check
throw new StorageException(
"Cannot store file with relative path outside current directory "
+ filename);
}
try (InputStream inputStream = file.getInputStream()) {
Files.copy(inputStream, this.rootLocation.resolve(filename),
StandardCopyOption.REPLACE_EXISTING);
}
}
catch (IOException e) {
throw new StorageException("Failed to store file " + filename, e);
}
}
And the controller class:
#Controller
public class FileUploadController {
private final StorageService storageService;
#Autowired
public FileUploadController(StorageService storageService) {
this.storageService = storageService;
}
#PostMapping("/")
public String handleFileUpload(#RequestParam("file") MultipartFile file,
RedirectAttributes redirectAttributes) {
storageService.store(file);
redirectAttributes.addFlashAttribute("message",
"You successfully uploaded " + file.getOriginalFilename() + "!");
return "redirect:/";
}
You can find the whole sample under https://github.com/spring-guides/gs-uploading-files.
2. Use case : You want to upload the file to a remote server:
I recommend in this case to use SFTP.
You create a RemoteFileSystemStorageService implementing the StorageService (Already created in the first use case).
#Service
public class RemoteFileSystemStorageService implements StorageService {
#Autowired
private StorageProperties properties
final private ChannelSftp channelSftp;
#PostConstruct
public void setUpSsh(){
JSch jsch = new JSch();
Session jschSession = jsch.getSession(properties.getUsername(),
properties.getRemoteHost());
jschSession.setPassword(properties.getPassword());
jschSession.connect();
this.channelSftp = (ChannelSftp)jschSession.openChannel("sftp");
}
#Override
public void store(MultipartFile file) {
String filename = StringUtils.cleanPath(file.getOriginalFilename());
try {
if (file.isEmpty()) {
throw new StorageException("Failed to store empty file " + filename);
}
if (filename.contains("..")) {
// This is a security check
throw new StorageException(
"Cannot store file with relative path outside current directory "
+ filename);
}
try (InputStream inputStream = file.getInputStream()) {
this.channelSftp.connect();
this.channelSftp.put(inputStream, properties.getRemoteServerDirectory());
}
}
catch (IOException e) {
throw new StorageException("Failed to store file " + filename, e);
}
finally{
this.channelSftp.close();
}
}
I am trying to use postman to test one of the post requests I created for my spring boot application. My post requests through postman always return 404.
I have created a same mapping route for a get request and with the postman, the get request works as expected.
I have tested with aws cli and made sure that I have the correct access key and secret key for uploading files to S3.
Code for my services
#Service
public class AmazonClient {
private AmazonS3 s3client;
#Value("${amazonProperties.endpointUrl}")
private String endpointUrl;
#Value("${amazonProperties.bucketName}")
private String bucketName;
#Value("${amazonProperties.accessKey}")
private String accessKey;
#Value("${amazonProperties.secretKey}")
private String secretKey;
#PostConstruct
private void initializeAmazon() {
AWSCredentials credentials = new BasicAWSCredentials(this.accessKey, this.secretKey);
this.s3client = AmazonS3ClientBuilder.standard().withRegion(Regions.US_EAST_2).withCredentials(
new AWSStaticCredentialsProvider(credentials)).build();
}
#Async
public String uploadFile(MultipartFile multipartFile, boolean enablePublicReadAccess) {
String fileUrl = "";
System.out.println("Reach");
try {
File file = convertMultiPartToFile(multipartFile);
String fileName = generateFileName(multipartFile);
System.out.println("FileName: " + fileName);
fileUrl = endpointUrl + "/" + bucketName + "/" + fileName;
PutObjectRequest putObjectRequest = new PutObjectRequest(this.bucketName, fileName, file);
if (enablePublicReadAccess) {
putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead);
}
s3client.putObject(putObjectRequest);
file.delete();
} catch (Exception e) {
e.printStackTrace();
}
return fileUrl;
}
private File convertMultiPartToFile(MultipartFile file) throws IOException {
File convFile = new File(file.getOriginalFilename());
FileOutputStream fos = new FileOutputStream(convFile);
fos.write(file.getBytes());
fos.close();
return convFile;
}
private String generateFileName(MultipartFile multiPart) {
return new Date().getTime() + "-" + multiPart.getOriginalFilename().replace(" ", "_");
}
public String deleteFileFromS3Bucket(String fileUrl) {
String fileName = fileUrl.substring(fileUrl.lastIndexOf("/") + 1);
s3client.deleteObject(new DeleteObjectRequest(bucketName, fileName));
return "Successfully deleted";
}
}
Code for my controller:
#RestController
#RequestMapping("/storage/files")
public class BucketController {
private AmazonClient amazonClient;
#Autowired
BucketController(AmazonClient amazonClient) {
this.amazonClient = amazonClient;
}
#GetMapping
public String getFile(){
return "Files";
}
#PostMapping("/file")
public String file() {
return "Reach!";
}
#PostMapping
public String uploadFile(#RequestPart(value = "file") MultipartFile file) {
System.out.println("Reach!!");
return this.amazonClient.uploadFile(file, true);
}
#DeleteMapping
public String deleteFile(#RequestPart(value = "url") String fileUrl) {
return this.amazonClient.deleteFileFromS3Bucket(fileUrl);
}
}
My security config:
#Override
protected void configure(HttpSecurity http) throws Exception {
http.authorizeRequests().antMatchers("/css/**", "/js/**", "/fonts/**", "/index").permitAll()
.antMatchers("/storage*").permitAll();
Through postman, I have selected a POST request and put http://localhost:8080/storage/files/file, in the body, I have entered a key "file" and set the value to a file type and chose a file from my local.
Here is the response:
{
"timestamp": "2019-09-02T19:09:54.864+0000",
"status": 404,
"error": "Not Found",
"message": "No message available",
"path": "/storage/files/file"
}
Project Structure
Postman Results
This is almost certainly your security config interfering.
Have you tried: .antMatchers("/storage/**") instead?