... do |attempts, response|
puts "#{attempts}; timestamp this time: #{response.password_data.timestamp}"
end
nil
proc = Proc.new do |attempts, response|
logger.debug "Waiting for password data timestamp to be newer", {pwd_timestamp: instance.password_data.timestamp, newer_than: since}
end
instance.wait_until(max_attempts: 30, delay: 10, before_wait: proc) { |i| i.password_data.timestamp != since }
i
is an Instance resource
w.max_attempts = 15
w.interval = 0
w.before_attempt do |a, resp|
pause_exponentially n
logger.debug "Waiting for password...", {instance_id: id, attempt_count: a}
end
def pause_exponentially(n, seed=2, exp=1.4)
sleep(seed * (exp ** n))
end
There is not an accessible way to change the default waiter value currently. Currently, the waiter provider is a private interface, and is subject to change. If you are comfortable poking into something that may change in the future, you could do the following:
EC2::Client.waiters.instance_variable_get("@waiters")[:instance_running][:delay] = 20
I’d be open to suggestions on how to make this more flexible, until then, I left the interfaces marked @api private.
exists?
method...? v2 does not. Aws::EC2::Instance does. the API reference doesn't show either Instance or Image to have an attribute that sounds like 'exists' other than maybe state
on Instance.
aws-sdk
gem to make the API calls.
Aws.config[:dynamodb][:stub_responses] = {put_item: 'ProvisionedThroughputExceededException’}
allow_any_instance_of(Aws::DynamoDB::Client).to receive(:put_item).
with(hash_including(table_name:'my-table').
and_raise(Aws::DynamoDB::Errors::ProvisionedThroughputExceededException)
Aws::DynamoDB::Errors::ProvisionedThroughputExceededException
. I kept getting an ArgumentError returned (ArgumentError: wrong number of arguments (0 for 2)
). I finally figured out that this had to do with the constructor of the error. Perhaps, this is expected, but I didn’t realize it. Setting nil
for both params made this work.allow_any_instance_of(Aws::DynamoDB::Client).to
receive(:put_item).
with(hash_including(table_name: "Users")).
and_raise(Aws::DynamoDB::Errors::ProvisionedThroughputExceededException.new(nil, nil))
hey guys, for the most part, the response stubbing is working great! I believe that I am seeing an issue, however, when a test runs code that has more than one DynamoDB request, where the second or third is the one that has a stubbed response.
For example... I have a method in my code that does a query
request to DynamoDB, followed by a get_item
. If I stub the query method as so: Aws.config[:dynamodb][:stub_responses] = {query: 'ProvisionedThroughputExceededException’}
, the exception is raised and the right thing happens. This first test works fine.
My second test is to allow the query
to succeed and instead have the get_item
request raise ProvisionedThroughputExceededException
. So, the stub looks like this: Aws.config[:dynamodb][:stub_responses] = {get_item: 'ProvisionedThroughputExceededException’}
. When I run this test (standalone and seperate from the first one), the query returns nothing, even though there are objects to return (i.e. if I comment out the response stubbing line, the query works). So, it seems as if my response stub for get_item
is affecting my query
request.
Do you guys have any thoughts on this? Thanks!
ddb = Aws::DynamoDB::Client.new
allow(ddb).to receive(:query).and_return(ddb.stub_data(:query, {})
allow(ddb).to receive(:get_item).and_raise(Aws::DynamoDB::ProvisionedThroughputExceededException.new(nil, nil))
`lambda = Aws::Lambda::Client.new(access_key_id: '',
secret_access_key: '',
region: 'us-west-2')
@function_name = '',
@s3_jar_path = '.jar',
s3 = Aws::S3::Client.new(access_key_id: '',
secret_access_key: '',
http_wire_trace: true,
region: 'us-east-1'
)
lambda_jar = s3.get_object(bucket: '**', key: @s3_jar_path)
lambda.update_function_configuration({
function_name: @function_name,
role: "",
handler: "",
timeout: 15,
memory_size: 512,
})
lambda.update_function_code({
function_name: @function_name,
zip_file: lambda_jar.body
}) `
lambda = Aws::Lambda::Client.new(access_key_id: '***',
secret_access_key: '***',
region: 'us-west-2')
@function_name = '***',
@s3_jar_path = '***.jar',
s3 = Aws::S3::Client.new(access_key_id: '***',
secret_access_key: '***',
http_wire_trace: true,
region: 'us-east-1'
)
lambda_jar = s3.get_object(bucket: '****', key: @s3_jar_path)
lambda.update_function_configuration({
function_name: @function_name,
role: "***",
handler: "***",
timeout: 15,
memory_size: 512,
})
lambda.update_function_code({
function_name: @function_name,
zip_file: lambda_jar.body
})
permanent_aws_creds = Aws::SharedCredentials.new(profile_name: 'ABCD')
cognitoIndentityClient = Aws::CognitoIdentity::Client.new(
region: 'us-east-1',
credentials: permanent_aws_creds,
)
developerProviderName = '1.Got From Developer Provider Name under Custom in Cognito Console'
IdentityTokenProvider = '2. Where do I get this from?'
identityPoolId = 'us-east-1:Xxxxx'
resp = cognitoIndentityClient.get_open_id_token_for_developer_identity(
identity_pool_id: identityPoolId,
logins: {
developerProviderName => IdentityTokenProvider
}
)
resp2 = cognitoIndentityClient.get_credentials_for_identity(
{
identity_id: resp['identity_id'],
logins: {
developerProviderName => resp['token'] # Am I passing in the right token?
}
}
)