AWSTemplateFormatVersion: "2010-09-09" Description: >- Template to create an AWS Batch job queue and supporting resources for use with the remote batch job submission functionality in the Wolfram Language ############ # METADATA # ############ Metadata: Author: "Wolfram Research" Version: 2.0.0 AWS::CloudFormation::Interface: ParameterGroups: - Label: default: "AWS Batch compute environment settings" Parameters: - ComputeEnvironmentMaxVCPUs - ComputeEnvironmentInstanceTypes - ComputeEnvironmentVPC - ComputeEnvironmentSubnets - Label: default: "AWS Batch job definition settings" Parameters: - JobDefinitionDefaultTimeout - JobDefinitionDefaultVCPUCount - JobDefinitionDefaultMemory - JobDefinitionDefaultGPUCount - JobDefinitionContainerImageStandard - JobDefinitionContainerImageCUDA - Label: default: "S3 bucket settings" Parameters: - IOBucketLifecycleExpiration - Label: default: "Access control settings" Parameters: - IAMCreateUserPolicy - IAMExistingBatchServiceRoleARN - Label: default: "Advanced settings" Parameters: - ECSCleanupCachedImages - ComputeEnvironmentEC2KeyPair - ComputeEnvironmentVPCSecurityGroup ParameterLabels: ComputeEnvironmentMaxVCPUs: default: "Maximum vCPUs" ComputeEnvironmentInstanceTypes: default: "Available instance types" ComputeEnvironmentVPC: default: "VPC" ComputeEnvironmentSubnets: default: "VPC subnets" ComputeEnvironmentVPCSecurityGroup: default: "Compute environment VPC security group" ComputeEnvironmentEC2KeyPair: default: "Compute environment SSH key pair" JobDefinitionDefaultTimeout: default: "Default job timeout" JobDefinitionDefaultVCPUCount: default: "Default vCPU requirement" JobDefinitionDefaultMemory: default: "Default memory requirement" JobDefinitionDefaultGPUCount: default: "Default GPU requirement" JobDefinitionContainerImageStandard: default: "Job container image (standard jobs)" JobDefinitionContainerImageCUDA: default: "Job container image (GPU jobs)" IOBucketLifecycleExpiration: default: "I/O bucket object expiration" IAMCreateUserPolicy: default: "Create end-user access policy" IAMExistingBatchServiceRoleARN: default: "AWS Batch service IAM role" ECSCleanupCachedImages: default: "Cached image cleanup" ############## # PARAMETERS # ############## Parameters: ####################### # Compute environment # ####################### ComputeEnvironmentMaxVCPUs: Description: >- Maximum number of vCPUs to allow the compute environment to scale to. Type: Number MinValue: 0 Default: 96 ComputeEnvironmentInstanceTypes: Description: >- Jobs define their vCPU, memory, and GPU requirements at submission, and that information is used to match the job with the most appropriately sized instance from the types (such as "c5.8xlarge") or families (such as "c5", "c5n", "g4dn") in this comma-delimited list. You can also specify the string "optimal" to pick instance types (from the C, M, and R instance families) on the fly that best match the demand of your submitted jobs. Note that not all instance families or types are available for use with AWS Batch. Type: CommaDelimitedList Default: "c5, m5, r5, p3" ComputeEnvironmentVPC: Description: >- The ID of a VPC (like "vpc-12924e85") into which instances should be launched. If left at the default, your account's default VPC will be used. Type: String Default: "DefaultVPC" ComputeEnvironmentSubnets: Description: >- A comma-delimited list of VPC subnet IDs (like "subnet-8d3f68c0,subnet-0c8ce2d2f831fe33c") in the selected VPC into which batch compute instances should be launched. Note that every subnet is linked to an availability zone, and some instance types may not be not available in certain zones. If left at the default, all subnets in the selected VPC will be used. In either case, the stack will create an AWS Lambda function and use it to fetch metadata about the selected VPC and subnets. Type: CommaDelimitedList Default: "AllSubnetsInVPC" ComputeEnvironmentVPCSecurityGroup: Description: >- The ID of an existing security group, like "sg-08bd4f5af4c05882c", to associate with instances launched in the compute environment. If specifying a custom security group, you must ensure that it is in the selected VPC and is configured to allow egress on all ports to the public internet for Wolfram Engine licensing, automatic updates, and access to Wolfram Cloud services. Egress must also be allowed to the Docker registry hosting the job container image (e.g. Docker Hub). If left at the default, a new security group allowing unrestricted egress and no ingress will be automatically created in the selected VPC. Type: String Default: "CreateNewSecurityGroup" ComputeEnvironmentEC2KeyPair: Description: >- An existing EC2 key pair name, like "MyLaptopKey", to associate with instances launched in the compute environment. In most cases no key pair is needed, but having SSH access can be useful for debugging. Type: String Default: "NoKeyPair" ################## # Job definition # ################## JobDefinitionDefaultTimeout: Description: >- The time duration in seconds after which to terminate running jobs, measured from the instant a job transitions into the "Running" state. For array jobs, this value applies to each array child job. This value is the default used if not overridden with the TimeConstraint option to RemoteBatchSubmit and RemoteBatchMapSubmit. Type: Number MinValue: 60 Default: 86400 JobDefinitionDefaultVCPUCount: Description: >- The number of vCPUs reserved for each job. This value is the default used if not overridden with the "VCPUCount" key in the RemoteProviderSettings option to RemoteBatchSubmit and RemoteBatchMapSubmit. Type: Number MinValue: 1 Default: 1 JobDefinitionDefaultMemory: Description: >- The hard limit (in MiB) of memory to present to job containers. If a job exceeds this limit, the job container is killed. This value is the default used if not overridden with the "Memory" key in the RemoteProviderSettings option to RemoteBatchSubmit and RemoteBatchMapSubmit. Type: Number MinValue: 512 Default: 4096 # empirically determined minimum for GPU JobDefinitionDefaultGPUCount: Description: >- The number of GPUs reserved for each job. This value is the default used if not overridden with the "GPUCount" key in the RemoteProviderSettings option to RemoteBatchSubmit and RemoteBatchMapSubmit. In order to use GPU scheduling, the compute environment must include at least one GPU-capable instance type, such as those in the "p3" or "g4dn" families. Type: Number MinValue: 0 Default: 0 JobDefinitionContainerImageStandard: Description: >- The Docker image to create job containers with for non-GPU jobs. "Automatic" dynamically chooses the latest Wolfram Engine image version that is compatible with the active compute environment at the time of job submission. The image can also be pinned to a specific Wolfram Language version like "wolframresearch/wolframengine:13.0". For information on Wolfram Engine image tags, see: https://hub.docker.com/r/wolframresearch/wolframengine Type: String Default: "Automatic" JobDefinitionContainerImageCUDA: Description: >- The Docker image to create job containers with when the requested GPU count for a job is >= 1. "Automatic" statically pins the latest CUDA-enabled Wolfram Engine image version that is compatible with the active compute environment at the time of stack creation/update. The image can also be manually pinned to a specific Wolfram Language version like "wolframresearch/wolframengine:13.0-cuda". For information on Wolfram Engine image tags, see: https://hub.docker.com/r/wolframresearch/wolframengine Type: String Default: "Automatic" ####### # S3 # ####### IOBucketLifecycleExpiration: Description: >- Number of days after creation to automatically delete job input and output data from Amazon S3. Type: Number MinValue: 0 Default: 90 ####### # IAM # ####### IAMCreateUserPolicy: Description: >- Whether to create an IAM policy for granting end users access to submit and retrieve jobs. Type: String AllowedValues: - "Yes" - "No" Default: "No" IAMExistingBatchServiceRoleARN: Description: >- ARN of an existing IAM role with the AWSBatchServiceRole managed policy. If unspecified, a new role for this stack will be automatically created. Type: String Default: "AutoCreateNewRole" ################# # ECS/ECS agent # ################# ECSCleanupCachedImages: Description: >- Enable or disable the Amazon ECS agent's automated image cleanup. Disabling cleanup can improve start latency on jobs submitted to already-running instances. Enabling cleanup may be preferable if you intend to also submit non-Wolfram Language jobs to the job queue created by this stack. Type: String AllowedValues: - "Enabled" - "Disabled" Default: "Disabled" ############## # CONDITIONS # ############## Conditions: JobDefinitionGPUCountNonZeroCondition: !Not [!Equals [!Ref JobDefinitionDefaultGPUCount, 0]] JobDefinitionContainerImageStandardAutomaticCondition: !Equals [!Ref JobDefinitionContainerImageStandard, "Automatic"] JobDefinitionContainerImageCUDAAutomaticCondition: !Equals [!Ref JobDefinitionContainerImageCUDA, "Automatic"] IAMCreateUserPolicyCondition: !Equals [!Ref IAMCreateUserPolicy, "Yes"] IAMCreateNewBatchServiceRoleCondition: !Equals [!Ref IAMExistingBatchServiceRoleARN, "AutoCreateNewRole"] ECSDisableCleanupCachedImagesCondition: !Equals [!Ref ECSCleanupCachedImages, "Disabled"] ComputeEnvironmentEC2KeyPairCondition: !Not [!Equals [!Ref ComputeEnvironmentEC2KeyPair, "NoKeyPair"]] ComputeEnvironmentCreateNewSecurityGroupCondition: !Equals [!Ref ComputeEnvironmentVPCSecurityGroup, "CreateNewSecurityGroup"] ############ # MAPPINGS # ############ Mappings: # This indicates the latest Wolfram Engine image version that is # compatible with AWS Batch. # # "Standard" is used for non-GPU jobs. # It is expected that this can always safely be "latest". # # "CUDA" is used for GPU jobs. # This should be the latest image version that is compatible with the # NVIDIA driver version in the GPU-enabled ECS AMI currently used by # AWS Batch. # The current GPU driver version can be determined by creating a new # compute environment with a GPU instance type, scaling it up, # connecting to the launched instance via SSH, and running the # `nvidia-smi` command. # The minimum driver version supported by the current version of the # Wolfram Engine is listed at https://support.wolfram.com/47638. # # When updating this mapping, please also bump the patch component of # the template version. SupportedImages: "Standard": Value: "wolframresearch/wolframengine:latest" "CUDA": Value: "wolframresearch/wolframengine:13.0.0-cuda" ############# # RESOURCES # ############# Resources: ######### # Batch # ######### BatchComputeEnvironment: Type: AWS::Batch::ComputeEnvironment Properties: Type: MANAGED State: "ENABLED" ServiceRole: !If [ IAMCreateNewBatchServiceRoleCondition, !GetAtt BatchServiceRole.Arn, !Ref IAMExistingBatchServiceRoleARN, ] ComputeResources: Type: EC2 AllocationStrategy: "BEST_FIT" MinvCpus: 0 MaxvCpus: !Ref ComputeEnvironmentMaxVCPUs DesiredvCpus: 0 InstanceTypes: !Ref ComputeEnvironmentInstanceTypes InstanceRole: !Ref EC2InstanceProfile LaunchTemplate: LaunchTemplateId: !Ref WolframEC2LaunchTemplate Version: "$Default" Subnets: !GetAtt SelectedVPCMetadata.SubnetIds SecurityGroupIds: !If [ ComputeEnvironmentCreateNewSecurityGroupCondition, [!Ref WolframEC2SecurityGroup], [!Ref ComputeEnvironmentVPCSecurityGroup], ] Ec2KeyPair: !If [ ComputeEnvironmentEC2KeyPairCondition, !Ref ComputeEnvironmentEC2KeyPair, !Ref AWS::NoValue, ] Tags: "Name": !Sub "${AWS::StackName}-BatchComputeInstance" "ComputeEnvironment-StackID": !Ref AWS::StackId "ComputeEnvironment-StackName": !Ref AWS::StackName # these are here mainly to ensure the compute environment # is recreated when the job definitions' images are changed "ComputeEnvironment-SupportedImages-StandardImage": !If [ JobDefinitionContainerImageStandardAutomaticCondition, !FindInMap [SupportedImages, "Standard", "Value"], "ManualImage", ] "ComputeEnvironment-SupportedImages-CUDAImage": !If [ JobDefinitionContainerImageCUDAAutomaticCondition, !FindInMap [SupportedImages, "CUDA", "Value"], "ManualImage", ] # This job definition is used for jobs with GPU count >= 1 submitted # by BatchComputationProvider_AWSBatch paclet versions >= 1.3.0, AND # for all jobs submitted by older paclet versions, regardless of # GPU count. # The logical ID is kept as "WolframJobDefinition" for compatibility # reasons; a more descriptive name would be "WolframJobDefinitionCUDA". WolframJobDefinition: Type: AWS::Batch::JobDefinition UpdateReplacePolicy: Retain Properties: Type: container Parameters: # These are not actually used in the command line. They're stored # as Parameters for convenience, because (unlike Tags) updating # this property doesn't require recreation of the definition. # This is a reference to the standard (no CUDA) version of the # job definition. # BatchComputationProvider_AWSBatch paclet versions >= 1.3.0 # switch to this version automatically for non-GPU jobs, based # on this reference. "Wolfram-RemoteBatchSubmit-JobDefinition-Standard-Link": !Ref WolframJobDefinitionStandard # This is a versioning field for the semantics of the image # support parameter data, so that client-side code can # recognize yet-unknown future versions of the scheme and # avoid misinterpreting them. "Wolfram-RemoteBatchSubmit-JobDefinition-ImageSupport-Epoch": "2022-01-11" RetryStrategy: Attempts: 1 Timeout: AttemptDurationSeconds: !Ref JobDefinitionDefaultTimeout ContainerProperties: Vcpus: !Ref JobDefinitionDefaultVCPUCount Memory: !Ref JobDefinitionDefaultMemory ResourceRequirements: - !If [ JobDefinitionGPUCountNonZeroCondition, { Type: GPU, Value: !Ref JobDefinitionDefaultGPUCount }, !Ref AWS::NoValue, ] Command: - "sh" - "-c" - 'echo "No job command provided." && exit 1' JobRoleArn: !GetAtt BatchJobRole.Arn Image: !If [ JobDefinitionContainerImageCUDAAutomaticCondition, !FindInMap [SupportedImages, "CUDA", "Value"], !Ref JobDefinitionContainerImageCUDA, ] # This job definition is used for jobs with GPU count == 0 submitted # from BatchComputationProvider_AWSBatch paclet versions >= 1.3.0. # It is not used for jobs submitted by older paclet versions. WolframJobDefinitionStandard: Type: AWS::Batch::JobDefinition Properties: Type: container Parameters: {} RetryStrategy: Attempts: 1 Timeout: AttemptDurationSeconds: !Ref JobDefinitionDefaultTimeout ContainerProperties: Vcpus: !Ref JobDefinitionDefaultVCPUCount Memory: !Ref JobDefinitionDefaultMemory ResourceRequirements: # in practice, this will always be overridden to 0 by the client - !If [ JobDefinitionGPUCountNonZeroCondition, { Type: GPU, Value: !Ref JobDefinitionDefaultGPUCount }, !Ref AWS::NoValue, ] Command: - "sh" - "-c" - 'echo "No job command provided." && exit 1' JobRoleArn: !GetAtt BatchJobRole.Arn Image: !If [ JobDefinitionContainerImageStandardAutomaticCondition, !FindInMap [SupportedImages, "Standard", "Value"], !Ref JobDefinitionContainerImageStandard, ] WolframJobQueue: Type: AWS::Batch::JobQueue Properties: #JobQueueName: "Wolfram-Batch-Job-Queue" ComputeEnvironmentOrder: - ComputeEnvironment: !Ref BatchComputeEnvironment Order: 1 Priority: 1 State: "ENABLED" ####### # EC2 # ####### WolframEC2LaunchTemplate: Type: AWS::EC2::LaunchTemplate Properties: #LaunchTemplateName: "Wolfram-RemoteBatchSubmit-Launch-Template" LaunchTemplateData: # TODO: pull the image on startup? UserData: Fn::Base64: !Sub - | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="==BOUNDARY==" --==BOUNDARY== Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash echo ECS_DISABLE_IMAGE_CLEANUP=${DisableImageCleanup} >> /etc/ecs/ecs.config echo ECS_IMAGE_PULL_BEHAVIOR=${ImagePullBehavior} >> /etc/ecs/ecs.config --==BOUNDARY==-- - DisableImageCleanup: !If [ECSDisableCleanupCachedImagesCondition, "true", "false"] ImagePullBehavior: !If [ ECSDisableCleanupCachedImagesCondition, "prefer-cached", "default", ] WolframEC2SecurityGroup: Type: AWS::EC2::SecurityGroup Condition: ComputeEnvironmentCreateNewSecurityGroupCondition Properties: GroupDescription: >- EC2 security group for instances launched by Batch VpcId: !GetAtt SelectedVPCMetadata.VpcId SecurityGroupEgress: - Description: >- Egress to Wolfram Research network for licensing authorization IpProtocol: tcp FromPort: 0 ToPort: 65535 CidrIp: 140.177.0.0/16 - Description: >- Egress to all addresses IpProtocol: "-1" CidrIp: 0.0.0.0/0 ###### # S3 # ###### WolframJobDataBucket: Type: AWS::S3::Bucket #DeletionPolicy: Retain #UpdateReplacePolicy: Retain Properties: LifecycleConfiguration: Rules: - Id: JobDataCleanup Status: Enabled # TODO: filter by tag ExpirationInDays: !Ref IOBucketLifecycleExpiration AbortIncompleteMultipartUpload: DaysAfterInitiation: 7 ####### # IAM # ####### BatchServiceRole: Type: AWS::IAM::Role Condition: IAMCreateNewBatchServiceRoleCondition Properties: Description: >- Role used by Batch to interact with EC2 and ECS Path: "/service-role/" ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole" AssumeRolePolicyDocument: Version: "2012-10-17" Statement: - Action: "sts:AssumeRole" Effect: "Allow" Principal: Service: "batch.amazonaws.com" EC2InstanceRole: Type: AWS::IAM::Role Properties: # TODO: optionally pick an existing role, once BatchJobPolicy isn't required Description: >- Role applied to EC2 instances started in the Batch compute environment ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" - !Ref BatchJobPolicy # TODO: remove once AWSClient support ECS task credentials AssumeRolePolicyDocument: Version: "2012-10-17" Statement: - Action: "sts:AssumeRole" Effect: "Allow" Principal: Service: "ec2.amazonaws.com" EC2InstanceProfile: Type: AWS::IAM::InstanceProfile Properties: Roles: - !Ref EC2InstanceRole BatchJobRole: Type: AWS::IAM::Role Properties: Description: >- Role applied to job containers running in the Batch compute environment ManagedPolicyArns: - !Ref BatchJobPolicy AssumeRolePolicyDocument: Version: "2012-10-17" Statement: - Action: "sts:AssumeRole" Effect: "Allow" Principal: Service: "ecs-tasks.amazonaws.com" BatchJobPolicy: Type: AWS::IAM::ManagedPolicy Properties: Description: >- Policy applied to job containers running in the Batch compute environment PolicyDocument: Version: "2012-10-17" Statement: - Resource: !Sub "arn:aws:s3:::${WolframJobDataBucket}/*" Action: - "s3:DeleteObject" - "s3:GetObject" - "s3:PutObject" Effect: "Allow" BatchUserPolicy: Type: AWS::IAM::ManagedPolicy Condition: IAMCreateUserPolicyCondition Properties: Description: >- Policy granting access to a Batch job queue and supporting resources for use with the Wolfram Language PolicyDocument: Version: "2012-10-17" Statement: - Resource: "*" Action: - "batch:DescribeComputeEnvironments" - "batch:DescribeJobDefinitions" - "batch:DescribeJobQueues" - "batch:DescribeJobs" - "batch:ListJobs" - "batch:TerminateJob" Effect: "Allow" - Resource: !Sub "arn:aws:s3:::${WolframJobDataBucket}/*" Action: - "s3:DeleteObject*" - "s3:GetObject*" - "s3:PutObject*" Effect: "Allow" - Resource: !Sub "arn:aws:s3:::${WolframJobDataBucket}" Action: - "s3:ListBucket" Effect: "Allow" - Resource: - !Ref WolframJobQueue # the role of the contrivances below is to construct a # resource string for each job definition where the # version number is the wildcard * instead of the # initial 1 - !Join [ ":", [ !Sub "arn:aws:batch:${AWS::Region}:${AWS::AccountId}", !Select [5, !Split [":", !Ref WolframJobDefinition]], "*", ], ] - !Join [ ":", [ !Sub "arn:aws:batch:${AWS::Region}:${AWS::AccountId}", !Select [5, !Split [":", !Ref WolframJobDefinitionStandard]], "*", ], ] Action: "batch:SubmitJob" Effect: "Allow" - Resource: !Sub "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/batch/job:*" Action: "logs:GetLogEvents" Effect: "Allow" ################ # VPC metadata # ################ # This is a rather elaborate contraption that serves the purpose of # getting a list of subnets in the selected VPC to feed into the # compute environment specification. I'm unaware of an easier way to # do this in CloudFormation, and Batch doesn't do anything smart like # automatically picking the default subnets if I omit the parameter. # # Said contraption works by creating a Lambda-driven CloudFormation # "custom resource" that represents metadata about the selected VPC. # CloudFormation calls the linked Lambda function, which performs a # DescribeSubnets call to obtain the list of subnets in the VPC, # filtered if applicable to those that the user specified. # This list is then returned to CloudFormation and appears as an # attribute in the SelectedVPCMetadata resource. SelectedVPCMetadata: Type: Custom::SelectedVPCMetadata Properties: ServiceToken: !GetAtt VPCMetadataFunction.Arn VPCID: !Ref ComputeEnvironmentVPC SubnetIDs: !Ref ComputeEnvironmentSubnets VPCMetadataFunction: Type: AWS::Lambda::Function Properties: Code: ZipFile: | const https = require('https'); const url = require('url'); const AWS = require('aws-sdk'); const ec2 = new AWS.EC2({ apiVersion: '2016-11-15' }); exports.handler = function(event, context) { if (event['RequestType'] === 'Delete') { // deletion is a null operation sendResponse(event, context, 'SUCCESS', {}); return; } const properties = event['ResourceProperties']; const inputVPCID = properties['VPCID']; const inputSubnetIDs = properties['SubnetIDs']; if (inputSubnetIDs.length === 0) { sendResponse(event, context, 'FAILED', {}, 'No subnet IDs were supplied. Supply a comma-delimited list of subnet IDs, ' + 'or use the string "AllSubnetsInVPC" to indicate all subnets in the selected VPC.' ); return; } const useDefaultVPC = inputVPCID === 'DefaultVPC'; const useAllSubnets = ( inputSubnetIDs.length === 1 && inputSubnetIDs[0] === 'AllSubnetsInVPC' ); const filters = []; if (useDefaultVPC) { filters.push({ Name: 'default-for-az', Values: ['true'] }); } else { filters.push({ Name: 'vpc-id', Values: [inputVPCID] }); } if (!useAllSubnets) { filters.push({ Name: 'subnet-id', Values: inputSubnetIDs }); } ec2.describeSubnets({ Filters: filters }).promise() .then(function(data) { const subnets = data['Subnets'].slice(0, 16); if (subnets.length >= 1) { sendResponse(event, context, 'SUCCESS', { 'VpcId': subnets[0]['VpcId'], 'SubnetIds': subnets.map(subnet => subnet['SubnetId']) }); } else { if (useDefaultVPC) { sendResponse(event, context, 'FAILED', {}, 'No default subnets are available in this region. Specify a VPC and subnet ' + 'IDs manually, or create a default VPC and subnets with the instructions at ' + 'https://aws.amazon.com/premiumsupport/knowledge-center/recreate-default-vpc/' ); } else { sendResponse(event, context, 'FAILED', {}, 'The supplied list contains no valid subnet IDs in the selected VPC ' + `(${inputVPCID}). Supply a comma-delimited list of subnet IDs, or use ` + 'the string "AllSubnetsInVPC" to indicate all subnets in the selected VPC.' ); } } }) .catch(function(err) { console.error(err); sendResponse(event, context, 'FAILED', {}, `DescribeSubnets request failed (${err})`); }); }; function sendResponse(event, context, responseStatus, resourceAttributes, responseReason) { const responseData = { 'Status': responseStatus, 'PhysicalResourceId': context.invokedFunctionArn, 'StackId': event['StackId'], 'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'NoEcho': false, 'Data': resourceAttributes }; if (responseReason) { responseData['Reason'] = responseReason; } const responseBody = JSON.stringify(responseData); console.log('Response body:\n', responseBody); var parsedUrl = url.parse(event['ResponseURL']); var options = { hostname: parsedUrl.hostname, port: 443, path: parsedUrl.path, method: 'PUT', headers: { 'Content-Type': '', 'Content-Length': responseBody.length } }; var request = https.request(options, function(response) { console.log(`Request status: ${response.statusCode} (${response.statusMessage})`); context.done(); }); request.on('error', function(err) { console.error('Failed executing https.request() with error:'); console.error(err); context.done(); }); request.write(responseBody); request.end(); } Handler: index.handler Runtime: nodejs16.x Timeout: 15 Role: !GetAtt VPCMetadataFunctionExecutionRole.Arn VPCMetadataFunctionExecutionRole: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: Version: "2012-10-17" Statement: - Effect: Allow Principal: Service: - lambda.amazonaws.com Action: - sts:AssumeRole Policies: - PolicyName: VPCMetadataFunctionExecutionRolePolicy PolicyDocument: Version: "2012-10-17" Statement: - Effect: Allow Action: - ec2:DescribeSubnets Resource: "*" # TODO: enable with a parameter gate for debugging #- Effect: Allow # Action: # - logs:CreateLogGroup # - logs:CreateLogStream # - logs:PutLogEvents # Resource: arn:aws:logs:*:*:* ########### # OUTPUTS # ########### Outputs: RemoteBatchSubmissionEnvironmentExpression: Description: >- RemoteBatchSubmissionEnvironment expression for use in the Wolfram Language Value: !Sub >- RemoteBatchSubmissionEnvironment["AWSBatch", <| "JobQueue" -> "${WolframJobQueue}", "JobDefinition" -> "${WolframJobDefinition}", "IOBucket" -> "${WolframJobDataBucket}" |>] IAMEndUserPolicy: Condition: IAMCreateUserPolicyCondition Description: >- IAM policy for end-user access to submit and retrieve jobs Value: !Ref BatchUserPolicy