/** * Upload a file which is in the assets bucket. * * @param fileName File name * @param file File * @param contentType Content type for file * @return */ public static boolean uploadFile(String fileName, File file, String contentType) { try { if (S3Module.amazonS3 != null) { String bucket = S3Module.s3Bucket; ObjectMetadata metaData = new ObjectMetadata(); if (contentType != null) { metaData.setContentType(contentType); } PutObjectRequest putObj = new PutObjectRequest(bucket, fileName, file); putObj.setMetadata(metaData); putObj.withCannedAcl(CannedAccessControlList.PublicRead); S3Module.amazonS3.putObject(putObj); return true; } else { Logger.error("Could not save because amazonS3 was null"); return false; } } catch (Exception e) { Logger.error("S3 Upload -" + e.getMessage()); return false; } }
public static void uploadToS3() { // upload to s3 bucket AWSCredentials awsCredentials = SkillConfig.getAWSCredentials(); AmazonS3Client s3Client = awsCredentials != null ? new AmazonS3Client(awsCredentials) : new AmazonS3Client(); File folder = new File("c:/temp/morse/" + DOT + "/mp3/"); File[] listOfFiles = folder.listFiles(); for (File file : listOfFiles) { if (file.isFile()) { if (!s3Client.doesObjectExist("morseskill", DOT + "/" + file.getName())) { PutObjectRequest s3Put = new PutObjectRequest("morseskill", DOT + "/" + file.getName(), file).withCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(s3Put); System.out.println("Upload complete: " + file.getName()); } else { System.out.println("Skip as " + file.getName() + " already exists."); } } } }
/** * The method creates a short URL code for the given url * * @param url * @return */ public ShortUrl createShortUrl(String url, String code) { logger.info("storing the url {} in the bucket {}", url, this.bucket); // Create the link for the short code if (code == null) { code = getObjectCode(); } String loadFile = redirectFile.replace("REPLACE", url); byte[] fileContentBytes = loadFile.getBytes(StandardCharsets.UTF_8); InputStream fileInputStream = new ByteArrayInputStream(fileContentBytes); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType("text/html"); metadata.addUserMetadata("url", url); metadata.setContentLength(fileContentBytes.length); PutObjectRequest putObjectRequest = new PutObjectRequest(this.bucket, code, fileInputStream, metadata) .withCannedAcl(CannedAccessControlList.PublicRead); this.s3Client.putObject(putObjectRequest); createDummyRecord(url, code); return new ShortUrl(url, code); }
@Override public void setBucketAcl(SetBucketAclRequest setBucketAclRequest) throws SdkClientException, AmazonServiceException { setBucketAclRequest = beforeClientExecution(setBucketAclRequest); String bucketName = setBucketAclRequest.getBucketName(); rejectNull(bucketName, "The bucket name parameter must be specified when setting a bucket's ACL"); AccessControlList acl = setBucketAclRequest.getAcl(); CannedAccessControlList cannedAcl = setBucketAclRequest.getCannedAcl(); if (acl == null && cannedAcl == null) { throw new IllegalArgumentException( "The ACL parameter must be specified when setting a bucket's ACL"); } if (acl != null && cannedAcl != null) { throw new IllegalArgumentException( "Only one of the acl and cannedAcl parameter can be specified, not both."); } if (acl != null) { setAcl(bucketName, null, null, acl, false, setBucketAclRequest); } else { setAcl(bucketName, null, null, cannedAcl, false, setBucketAclRequest); } }
public String uploadImageToS3(final BufferedImage image, final String fileKey) throws IOException { ByteArrayInputStream bis = null; final ByteArrayOutputStream bos = new ByteArrayOutputStream(); try { ImageIO.write(image, "png", bos); final byte[] bImageData = bos.toByteArray(); bis = new ByteArrayInputStream(bImageData); // upload to s3 bucket final PutObjectRequest s3Put = new PutObjectRequest(bucket, fileKey, bis, null).withCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(s3Put); return getS3Url(fileKey); } finally { try { bos.close(); if (bis != null) bis.close(); } catch(IOException e) { logger.severe("Error while closing stream for writing an image for " + fileKey + " caused by " + e.getMessage()); } } }
protected static String uploadFileToS3(BufferedImage image, String word, Boolean codeOnly) throws IOException { ByteArrayInputStream bis = null; ByteArrayOutputStream bos = new ByteArrayOutputStream(); try { String bucket = SkillConfig.getS3BucketName(); String fileKey = getFileKey(word, codeOnly); ImageIO.write(image, "png", bos); byte[] bImageData = bos.toByteArray(); bis = new ByteArrayInputStream(bImageData); // upload to s3 bucket AWSCredentials awsCredentials = SkillConfig.getAWSCredentials(); AmazonS3Client s3Client = awsCredentials != null ? new AmazonS3Client(awsCredentials) : new AmazonS3Client(); PutObjectRequest s3Put = new PutObjectRequest(bucket, fileKey, bis, null).withCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(s3Put); return getS3Url(word, codeOnly); } finally { bos.close(); if (bis != null) bis.close(); } }
@Override public PutObjectResult uploadObject(final String bucketName, final String fileName, final InputStream inputStream, final CannedAccessControlList cannedAcl) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadObject invoked, bucketName: {} , fileName: {}, cannedAccessControlList: {}", bucketName, fileName, cannedAcl); File tempFile = null; PutObjectRequest putObjectRequest = null; PutObjectResult uploadResult = null; try { // Create temporary file from stream to avoid 'out of memory' exception tempFile = AWSUtil.createTempFileFromStream(inputStream); putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile).withCannedAcl(cannedAcl); uploadResult = uploadObject(putObjectRequest); } finally { AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded } return uploadResult; }
@Override public PutObjectResult createDirectory(final String bucketName, final String dirName, final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException { LOGGER.info("createDirectory invoked, bucketName: {}, dirName: {} and isPublicAccessible: {}", bucketName, dirName, isPublicAccessible); final ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); // Create empty content,since creating empty folder needs an empty content final InputStream emptyContent = new ByteArrayInputStream(new byte[0]); // Create a PutObjectRequest passing the directory name suffixed by '/' final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, dirName + AWSUtilConstants.SEPARATOR, emptyContent, metadata); if(isPublicAccessible){ putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); } return s3client.putObject(putObjectRequest); }
public static void uploadToS3(TransferManager transferManager, final File file, final FileProgressListener fileProgressListener) throws InterruptedException { PutObjectRequest putObjectRequest = new PutObjectRequest("car-classifieds", file.getName(), file) .withCannedAcl(CannedAccessControlList.PublicRead); final Upload upload = transferManager.upload(putObjectRequest); upload.addProgressListener(new ProgressListener() { @Override public void progressChanged(ProgressEvent progressEvent) { fileProgressListener.onProgressChanged(progressEvent.getBytesTransferred()); if (progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) { fileProgressListener.onCompleteUpload(); } if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE) { fileProgressListener.onStartUpload(); } if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE) { fileProgressListener.onFailedUpload(); } } }); }
public cfData execute( cfSession _session, cfArgStructData argStruct ) throws cfmRunTimeException{ AmazonKey amazonKey = getAmazonKey(_session, argStruct); AmazonS3 s3Client = getAmazonS3(amazonKey); String bucket = getNamedStringParam(argStruct, "bucket", null ); String key = getNamedStringParam(argStruct, "key", null ); if ( key != null && key.charAt( 0 ) == '/' ) key = key.substring(1); CannedAccessControlList acl = amazonKey.getAmazonCannedAcl( getNamedStringParam(argStruct, "acl", null ) ); try { s3Client.setObjectAcl(bucket, key, acl); } catch (Exception e) { throwException(_session, "AmazonS3: " + e.getMessage() ); } return cfBooleanData.TRUE; }
public cfData execute( cfSession _session, cfArgStructData argStruct ) throws cfmRunTimeException{ AmazonKey amazonKey = getAmazonKey(_session, argStruct); AmazonS3 s3Client = getAmazonS3(amazonKey); String bucket = getNamedStringParam(argStruct, "bucket", null ); CannedAccessControlList acl = amazonKey.getAmazonCannedAcl( getNamedStringParam(argStruct, "acl", null ) ); try { s3Client.setBucketAcl(bucket, acl); } catch (Exception e) { throwException(_session, "AmazonS3: " + e.getMessage() ); } return cfBooleanData.TRUE; }
/** * private | public-read | public-read-write | authenticated-read | bucket-owner-read | bucket-owner-full-control | log-delivery-write * * @param acl * @return */ public CannedAccessControlList getAmazonCannedAcl(String acl) { if (acl.equalsIgnoreCase("private")) return CannedAccessControlList.Private; else if (acl.equalsIgnoreCase("public-read") || acl.equalsIgnoreCase("publicread")) return CannedAccessControlList.PublicRead; else if (acl.equalsIgnoreCase("public-read-write") || acl.equalsIgnoreCase("publicreadwrite")) return CannedAccessControlList.PublicReadWrite; else if (acl.equalsIgnoreCase("authenticated-read") || acl.equalsIgnoreCase("authenticatedread")) return CannedAccessControlList.AuthenticatedRead; else if (acl.equalsIgnoreCase("bucket-owner-read") || acl.equalsIgnoreCase("bucketownerread")) return CannedAccessControlList.BucketOwnerRead; else if (acl.equalsIgnoreCase("bucket-owner-full-control") || acl.equalsIgnoreCase("bucketownerfullcontrol")) return CannedAccessControlList.BucketOwnerFullControl; else if (acl.equalsIgnoreCase("log-delivery-write") || acl.equalsIgnoreCase("logdeliverywrite")) return CannedAccessControlList.LogDeliveryWrite; else return CannedAccessControlList.Private; }
@Test public void testHttpClient() throws Exception { String blobName = "blob-name"; ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) { client.setBucketAcl(containerName, CannedAccessControlList.PublicRead); } else { client.setObjectAcl(containerName, blobName, CannedAccessControlList.PublicRead); } HttpClient httpClient = context.utils().http(); URI uri = new URI(s3Endpoint.getScheme(), s3Endpoint.getUserInfo(), s3Endpoint.getHost(), s3Proxy.getSecurePort(), servicePath + "/" + containerName + "/" + blobName, /*query=*/ null, /*fragment=*/ null); try (InputStream actual = httpClient.get(uri); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasContentEqualTo(expected); } }
@Override public boolean publicEntity(String bucketName, String keyName) { LOG.info("Sets the CannedAccessControlList for the specified object " + keyName + " in Amazon S3 using one of the pre-configured CannedAccessControlLists"); try { amazonS3Client.setObjectAcl(bucketName, keyName, CannedAccessControlList.PublicRead); return true; } catch (AmazonServiceException ase) { LOG.warn(ase.getMessage(), ase); } catch (AmazonClientException ace) { LOG.warn(ace.getMessage(), ace); } return false; }
private boolean uploadTextAsFile(String key, String contents){ try{ byte[] bytes = contents.getBytes("UTF-8"); InputStream is = new ByteArrayInputStream(bytes); String bucket = "s3.staticvoidgames.com"; ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(bytes.length); meta.setContentType("text/html"); String awsAccessKey = env.getProperty("aws.accessKey"); String awsSecretKey = env.getProperty("aws.secretKey"); AmazonS3 s3 = new AmazonS3Client(new BasicAWSCredentials(awsAccessKey, awsSecretKey)); s3.putObject(bucket, key, is, meta); s3.setObjectAcl(bucket, key, CannedAccessControlList.PublicRead); } catch(UnsupportedEncodingException e){ e.printStackTrace(); return false; } return true; }
/** * Create an amazon bucket in the specified region * @param bucket - The s3 bucket name * @param region - The S3 region the bucket should be created in * @param accessList - The access control list settings for the bucket */ public void createBucket(final String bucket, final Region region, final CannedAccessControlList cannedACL, final AccessControlList accessList){ final CreateBucketRequest request = new CreateBucketRequest(bucket, region); if(cannedACL!=null){ request.withCannedAcl(cannedACL); } if(accessList!=null){ request.withAccessControlList(accessList); } this.client.createBucket(request); }
/*** * Initialize S3 buckets */ @Override public void run(AppConfiguration configuration, Environment environment)throws Exception { if(configuration!=null && configuration.getAws()!=null && configuration.getAws().getS3()!=null){ final AmazonS3Client client = new AmazonS3Client(); try{ LOGGER.info("Executing configured s3 bundle"); final S3Config s3Config = configuration.getAws().getS3(); final S3Admin admin = new S3Admin(client); admin.createBucket(s3Config.getDefaultDrive(), CannedAccessControlList.BucketOwnerFullControl); for(String bucket : s3Config.getBuckets()){ admin.createBucket(bucket, CannedAccessControlList.BucketOwnerFullControl); } LOGGER.info("Completed s3 bundle execution"); }catch(Exception ex){ LOGGER.error(ex.getMessage(), ex); }finally{ client.shutdown(); } } }
public String storeFile(CommonsMultipartFile file, String userId, String postTimestamp) { try { ObjectMetadata omd = new ObjectMetadata(); omd.setContentType("application/octet-stream"); omd.addUserMetadata("originalfilename", file.getOriginalFilename()); String path = "files/" + userId + "_" + postTimestamp.replace(':', '_') + "_" + file.getOriginalFilename(); PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, path, file.getInputStream(), omd); s3client.putObject(request); s3client.setObjectAcl(BUCKET_NAME, path, CannedAccessControlList.PublicRead); return "http://s3.amazonaws.com/" + BUCKET_NAME + "/" + path; } catch (IOException e) { return null; } }
public String storePicture(CommonsMultipartFile file, String userId, String postTimestamp) { try { ObjectMetadata omd = new ObjectMetadata(); omd.setContentType("application/octet-stream"); omd.addUserMetadata("originalfilename", file.getOriginalFilename()); String path = "pictures/" + userId + "_" + postTimestamp.replace(':', '_') + "_" + file.getOriginalFilename(); PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, path, file.getInputStream(), omd); s3client.putObject(request); s3client.setObjectAcl(BUCKET_NAME, path, CannedAccessControlList.PublicRead); return "http://s3.amazonaws.com/" + BUCKET_NAME + "/" + path; } catch (IOException e) { return null; } }
public S3Resolver( String name, AWSCredentialsProvider credentialsProvider, boolean overwrite, Region region, Optional<CannedAccessControlList> acl, boolean serverSideEncryption, StorageClass storageClass ) { setName(name); setRepository(new S3Repository( credentialsProvider, overwrite, region, acl, serverSideEncryption, storageClass )); }
public S3Repository( AWSCredentialsProvider provider, boolean overwrite, Region region, CannedAccessControlList acl, boolean serverSideEncryption, StorageClass storageClass ) { this( AmazonS3Client.builder().standard() .withCredentials(provider) .withRegion(region.toString()) .build(), overwrite, Optional.ofNullable(acl), serverSideEncryption, storageClass ); }
public S3Repository( AWSCredentialsProvider provider, boolean overwrite, Region region, Optional<CannedAccessControlList> acl, boolean serverSideEncryption, StorageClass storageClass ) { this( AmazonS3Client.builder().standard() .withCredentials(provider) .withRegion(region.toString()) .build(), overwrite, acl, serverSideEncryption, storageClass ); }
@Override public void save(String path, String fileName, byte[] bytes) { byte[] md5 = DigestUtils.md5(bytes); InputStream is = new ByteArrayInputStream(bytes); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(bytes.length); //setting max-age to 15 days metadata.setCacheControl("max-age=1296000"); metadata.setContentMD5(new String(Base64.encodeBase64(md5))); PutObjectRequest request = new PutObjectRequest(bucketName, subPath + path + fileName, is, metadata); request.setCannedAcl(CannedAccessControlList.PublicRead); PutObjectResult result = s3Client.putObject(request); logger.debug("Etag:" + result.getETag() + "-->" + result); }
public static void storeFileInBucket(String filename, InputStream is, ObjectMetadata metadata) { String bucketName = properties.getProperty(S3_BUCKET_NAME); try { LOG.info("Storing file {} in S3 bucket {}.", filename, bucketName); S3.putObject(bucketName, filename, is, metadata); S3.setObjectAcl(bucketName, filename, CannedAccessControlList.PublicRead); } catch (AmazonServiceException e) { LOG.error("Exception occured when fetching file from S3 bucket {}.", bucketName, e); } }
/** * Constructs canned acl from string */ public static CannedAccessControlList initCannedACL(String cannedACL) { if (cannedACL == null || cannedACL.equals("")) { return CannedAccessControlList.Private; } for (CannedAccessControlList cur : CannedAccessControlList.values()) { if (cur.toString().equalsIgnoreCase(cannedACL)) { return cur; } } throw new BlobStoreException("cannedACL is not valid: [" + cannedACL + "]"); }
@Test @Ignore("Not a test") public void subuserWithS3CredentialIncorporated() throws Exception { testWithUserAndS3( (user, s3) -> { createSomeObjects(s3); List<SubUser> subUser = RGW_ADMIN.createSubUser( user.getUserId(), "QQQ", ImmutableMap.of("key-type", "s3", "access", "full")); User userInfo = RGW_ADMIN.getUserInfo(user.getUserId()).get(); S3Credential subUserKey = userInfo .getS3Credentials() .stream() .filter(v -> v.getUserId().equals(subUser.get(0).getId())) .findFirst() .get(); AmazonS3 subUserS3 = createS3(subUserKey.getAccessKey(), subUserKey.getSecretKey()); createSomeObjects(subUserS3); // The S3 bucket created by parent user and created by child subuser are incorporated. assertEquals(s3.listBuckets().size(), subUserS3.listBuckets().size()); for (String bucketName : s3.listBuckets().stream().map(v -> v.getName()).collect(Collectors.toList())) { assertEquals( s3.listObjects(bucketName).getObjectSummaries().toString(), subUserS3.listObjects(bucketName).getObjectSummaries().toString()); subUserS3.getBucketAcl(bucketName); subUserS3.setBucketAcl(bucketName, CannedAccessControlList.AuthenticatedRead); } }); }
@Override public void run() { ObjectMetadata meta_data = new ObjectMetadata(); if (p_content_type != null) meta_data.setContentType(p_content_type); meta_data.setContentLength(p_size); PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data); putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); PutObjectResult res = s3Client.putObject(putObjectRequest); }
/** * Same as {@link #setObjectAcl(String, String, String, CannedAccessControlList)} * but allows specifying a request metric collector. */ public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl, RequestMetricCollector requestMetricCollector) { setObjectAcl(new SetObjectAclRequest(bucketName, key, versionId, acl) .<SetObjectAclRequest> withRequestMetricCollector(requestMetricCollector)); }
/** * Same as {@link #setBucketAcl(String, CannedAccessControlList)} * but allows specifying a request metric collector. */ public void setBucketAcl(String bucketName, CannedAccessControlList cannedAcl, RequestMetricCollector requestMetricCollector) throws SdkClientException, AmazonServiceException { SetBucketAclRequest request = new SetBucketAclRequest(bucketName, cannedAcl) .withRequestMetricCollector(requestMetricCollector); setBucketAcl(request); }
void saveImageToAWS(String fileName, File file, String sessionId, String index) { PutObjectRequest request = new PutObjectRequest(s3Name, fileName, file); request.setCannedAcl(CannedAccessControlList.PublicRead); getS3().putObject(request); // Save the information to DB storeNewImageOnDynamo(sessionId, index, s3Endpoint + fileName); }
public String uploadMorseToS3(final String text, final int wpm, final int wpmFarnsworth) throws IOException, InterruptedException, UnsupportedAudioFileException, LineUnavailableException { // generate filenames final String filename = URLEncoder.encode(text.replace(" ", "_"), "UTF-8") + "-" + String.valueOf(wpm) + "-" + String.valueOf(wpmFarnsworth); final String mp3Filename = filename + ".mp3"; final String filenameWav = filename + ".wav"; final String s3Mp3FilePath = mp3Filename; // check if this code was already encoded and is available in the bucket if (!s3Client.doesObjectExist(bucket, mp3Filename)) { logger.info(String.format("%s not found in S3 bucket. Start encoding code now.", mp3Filename)); // convert the code to phonetic version as wave final File wavFile = MorseUtils.encodeMorseToWave(text, filenameWav, wpm, wpmFarnsworth); // convert the wave file to mp3 leveraging ffmpeg final File mp3File = Mp3Utils.convertWaveToMp3(wavFile, mp3Filename); // upload mp3 to S3 bucket final PutObjectRequest s3Put = new PutObjectRequest(bucket, s3Mp3FilePath, mp3File).withCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(s3Put); try { // delete files from local disk if (!mp3File.delete() || !wavFile.delete()) { logger.warning("Could not delete either one or both of the temporary audio files."); } } catch(SecurityException ex) { logger.severe("Could not delete files due to " + ex.getMessage()); } } else { logger.info(String.format("%s already exists in S3 bucket thus encoding is skipped.", s3Mp3FilePath)); } // return public url of mp3 in bucket return bucketUrl + mp3Filename; }
public String upload(InputStream is, String fileName, String sufix, Boolean isPublic) throws Exception { validateFile(is, sufix); if (isPublic == null) { isPublic = Boolean.TRUE; } if (is != null && fileName != null) { try { byte[] bytes = IOUtils.toByteArray(is); s3Client.putObject( new PutObjectRequest( cdnConfig.getName(), fileName + "." + sufix, new ByteArrayInputStream(bytes), S3ObjectMetadata.getObjectMetadata(bytes) ).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead) ); return fileName + "." + sufix; } catch (AmazonServiceException | IOException e) { throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode()); } finally { is.close(); } } else { throw new BusinessException(Validations.INVALID_PARAMETERS.getCode()); } }
public void uploadFileToS3(final File file, final String bucket, final String path, final String region, final String roleArn) { // upload mp3 to S3 bucket final PutObjectRequest s3Put = new PutObjectRequest(bucket, path, file).withCannedAcl(CannedAccessControlList.PublicRead); getS3Client(region, roleArn).putObject(s3Put); if (!file.delete()) { logger.warning("Could not delete mp3 temporary audio file."); } }
/** * Sets public read permissions on content within an S3 bucket. * * <p>Web content served from an S3 bucket must have public read permissions. * * @param bucketName the bucket to apply the permissions to. * @param prefix prefix within the bucket, beneath which to apply the permissions. * @param logger a CloudwatchLogs logger. */ public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix, LambdaLogger logger) { // Ensure newly uploaded content has public read permission ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: " + prefix.get()); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix( prefix.get()); } else { logger.log("Setting public read permission on bucket: " + bucketName); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { logger.log("Setting permissions for S3 object: " + objectSummary.getKey()); client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Finished setting public read permissions"); }
/** * Adds gzip content-encoding metadata to S3 objects. * * <p>Adds gzip content-encoding metadata to S3 objects. All objects * beneath the specified prefix (i.e. folder) will have the * metadata added. When the bucket serves objects it will then * add a suitable Content-Encoding header. * * @param bucketName the bucket to apply the metadata to. * @param prefix prefix within the bucket, beneath which to apply the metadata. * @param logger a CloudwatchLogs logger. */ public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: " + prefix.get()); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix( prefix.get()); } else { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setContentEncoding("gzip"); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList( CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set gzip content encoding metadata on bucket"); }
/** * Adds cache-control header to S3 objects. * * <p>Adds cache-control header to S3 objects. All objects * beneath the specified prefix (i.e. folder), and with the * specified extension will have the header added. When the * bucket serves objects it will then add a suitable * Cache-Control header. * * @param headerValue value of the cache-control header * @param bucketName the bucket to apply the header to. * @param prefix prefix within the bucket, beneath which to apply the header. * @param extension file extension to apply header to * @param logger a CloudwatchLogs logger. */ public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix, String extension, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and prefix: " + prefix.get() + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix( prefix.get()); } else { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); if (!key.endsWith(extension)) { continue; } logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setCacheControl(headerValue); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList( CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set cache-control metadata on bucket"); }
private void copyJsonDataToS3(String keyName, String jsonToCopy) throws Exception { logger.log("About to copy cached json data to S3"); try { logger.log("Uploading json data to S3 bucket: " + websiteBucketName + " and key: " + keyName + ".json"); byte[] jsonAsBytes = jsonToCopy.getBytes(StandardCharsets.UTF_8); ByteArrayInputStream jsonAsStream = new ByteArrayInputStream(jsonAsBytes); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(jsonAsBytes.length); metadata.setContentType("application/json"); // Direct caches not to satisfy future requests with this data without // revalidation. if (keyName.contains("famousplayers")) { // Famousplayers list is good for a year metadata.setCacheControl("max-age=31536000"); } else { metadata.setCacheControl("no-cache, must-revalidate"); } PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucketName, keyName + ".json", jsonAsStream, metadata); // Data must be public so it can be served from the website putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); IS3TransferManager transferManager = getS3TransferManager(); TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger); logger.log("Uploaded cached json data to S3 bucket"); } catch (AmazonServiceException ase) { ExceptionUtils.logAmazonServiceException(ase, logger); throw new Exception("Exception caught while copying json data to S3"); } catch (AmazonClientException ace) { ExceptionUtils.logAmazonClientException(ace, logger); throw new Exception("Exception caught while copying json data to S3"); } catch (InterruptedException e) { logger.log("Caught interrupted exception: "); logger.log("Error Message: " + e.getMessage()); throw new Exception("Exception caught while copying json data to S3"); } }
private void upload(final String name) { final ObjectMetadata objMetadata = new ObjectMetadata(); objMetadata.setContentType("text/html"); try (InputStream is = this.getClass().getResourceAsStream(name)) { final PutObjectRequest putObjectRequest = new PutObjectRequest(BucketName, name, is, objMetadata); this.s3.putObject(putObjectRequest); } catch (final IOException e) { throw new RuntimeException("cannot upload " + name, e); } this.s3.setObjectAcl(BucketName, name, CannedAccessControlList.PublicRead); }
/** * {@inheritDoc} */ @Override public S3Response uploadFile(S3Request request) { ObjectMetadata objectMetadata = Objects.requireNonNull(request.getMetadata(), OBJ_METADATA_NULL_MSG); CannedAccessControlList cannedACL = Objects.requireNonNull(request.getCannedACL(), ACL_NULL_MSG); try (InputStream data = Objects.requireNonNull(request.getData(), DATA_NULL_MSG);) { return new S3Response() .withPutObjectResult(this.s3Client.putObject(new PutObjectRequest(request.getBucketName(), request.getKey(), data, objectMetadata).withCannedAcl(cannedACL))); } catch (Exception ex) { LOGGER.error("Exception while uploading file!!", ex); throw new AwsException(ex.getMessage(), ex); } }
@POST @Path(PATH_UPLOAD) @Consumes(MULTIPART_FORM_DATA) @RequiresJwt public Response uploadFile(MultipartFormDataInput multipart) { try { byte[] data = multipart.getFormDataPart(PARAM_DATA, byte[].class, null); String bucketName = multipart.getFormDataPart(PARAM_BUCKET_NAME, String.class, null); String key = multipart.getFormDataPart(PARAM_KEY, String.class, null); String cannedACL = multipart.getFormDataPart(PARAM_CANNED_ACL, String.class, null); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength((long) data.length); this.storageService.uploadFile(S3Request.builder() .bucketName(bucketName) .key(key) .data(IOUtils.buffer(new ByteArrayInputStream(data))) .metadata(metadata) .cannedACL(CannedAccessControlList.valueOf(cannedACL)) .build()); return Response.ok("File uploaded successfully!!").build(); } catch (Exception ex) { throw JaxRSException.builder() .message(ex.getMessage()) .cause(ex) .status(STATUS_SERVER_ERROR) .logException(true) .build(); } }