Skip to content

Commit

Permalink
Merge branch 'main' into zhessler-fix-const-names-in-codegen
Browse files Browse the repository at this point in the history
  • Loading branch information
Velfi authored May 14, 2024
2 parents 0baea0b + 1117dc7 commit ce189b0
Show file tree
Hide file tree
Showing 12 changed files with 157 additions and 49 deletions.
29 changes: 28 additions & 1 deletion CHANGELOG.next.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,31 @@
# message = "Fix typos in module documentation for generated crates"
# references = ["smithy-rs#920"]
# meta = { "breaking" = false, "tada" = false, "bug" = false, "target" = "client | server | all"}
# author = "rcoh"
# author = "rcoh"


[[aws-sdk-rust]]
message = "Fix panics that occurred when `Duration` for exponential backoff could not be created from too big a float."
references = ["aws-sdk-rust#1133"]
meta = { "breaking" = false, "tada" = false, "bug" = true }
author = "ysaito1001"

[[smithy-rs]]
message = "Fix panics that occurred when `Duration` for exponential backoff could not be created from too big a float."
references = ["aws-sdk-rust#1133"]
meta = { "breaking" = false, "tada" = false, "bug" = true, "target" = "client" }
author = "ysaito1001"

[[smithy-rs]]
message = "Clients now enforce that the Content-Length sent by the server matches the length of the returned response body. In most cases, Hyper will enforce this behavior, however, in extremely rare circumstances where the Tokio runtime is dropped in between subsequent requests, this scenario can occur."
references = ["smithy-rs#3491", "aws-sdk-rust#1079"]
meta = { "breaking" = false, "bug" = true, "tada" = false }
author = "rcoh"

[[aws-sdk-rust]]
message = "Clients now enforce that the Content-Length sent by the server matches the length of the returned response body. In most cases, Hyper will enforce this behavior, however, in extremely rare circumstances where the Tokio runtime is dropped in between subsequent requests, this scenario can occur."
references = ["aws-sdk-rust#1079"]
meta = { "breaking" = false, "bug" = true, "tada" = false }
author = "rcoh"


12 changes: 10 additions & 2 deletions aws/sdk/integration-tests/s3/tests/endpoints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,16 @@ async fn write_get_object_response() {
);

let captured_request = req.expect_request();
let uri_no_query = captured_request
.uri()
.splitn(2, '?')
.into_iter()
.next()
.unwrap()
.to_string();

assert_eq!(
captured_request.uri().to_string(),
"https://req-route.s3-object-lambda.us-west-4.amazonaws.com/WriteGetObjectResponse?x-id=WriteGetObjectResponse"
uri_no_query,
"https://req-route.s3-object-lambda.us-west-4.amazonaws.com/WriteGetObjectResponse"
);
}
14 changes: 13 additions & 1 deletion aws/sdk/integration-tests/s3/tests/express.rs
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,19 @@ async fn default_checksum_should_be_crc32_for_operation_requiring_checksum() {
.send()
.await;

http_client.assert_requests_match(&[""]);
let checksum_headers: Vec<_> = http_client
.actual_requests()
.last()
.unwrap()
.headers()
.iter()
.filter(|(key, _)| key.starts_with("x-amz-checksum"))
.collect();

assert_eq!(1, checksum_headers.len());
assert_eq!("x-amz-checksum-crc32", checksum_headers[0].0);
// FIXME(V1373841114): re-enable assertion after model updates
// http_client.assert_requests_match(&[""]);
}

#[tokio::test]
Expand Down
23 changes: 11 additions & 12 deletions aws/sdk/integration-tests/s3/tests/select-object-content.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ use aws_sdk_s3::types::{
OutputSerialization, SelectObjectContentEventStream,
};
use aws_sdk_s3::Client;
use aws_smithy_protocol_test::{assert_ok, validate_body, MediaType};
use aws_smithy_runtime::client::http::test_util::dvr::{Event, ReplayingClient};
use std::error::Error;

#[tokio::test]
async fn test_success() {
Expand Down Expand Up @@ -84,16 +82,17 @@ async fn test_success() {
received
);

// FIXME(V1373841114): re-enable assertion after model updates
// Validate the requests
replayer
.validate(&["content-type", "content-length"], body_validator)
.await
.unwrap();
// replayer
// .validate(&["content-type", "content-length"], body_validator)
// .await
// .unwrap();
}

fn body_validator(expected_body: &[u8], actual_body: &[u8]) -> Result<(), Box<dyn Error>> {
let expected = std::str::from_utf8(expected_body).unwrap();
let actual = std::str::from_utf8(actual_body).unwrap();
assert_ok(validate_body(actual, expected, MediaType::Xml));
Ok(())
}
// fn body_validator(expected_body: &[u8], actual_body: &[u8]) -> Result<(), Box<dyn Error>> {
// let expected = std::str::from_utf8(expected_body).unwrap();
// let actual = std::str::from_utf8(actual_body).unwrap();
// assert_ok(validate_body(actual, expected, MediaType::Xml));
// Ok(())
// }
2 changes: 0 additions & 2 deletions aws/sdk/integration-tests/s3/tests/streaming-response.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ use std::net::SocketAddr;
use std::time::Duration;
use tracing::debug;

// TODO(https://github.com/smithy-lang/smithy-rs/issues/3523): Unignore this test
#[tokio::test]
#[ignore]
async fn test_too_short_body_causes_an_error() {
// this is almost impossible to reproduce with Hyper—you need to do stuff like run each request
// in its own async runtime. But there's no reason a customer couldn't run their _own_ HttpClient
Expand Down
2 changes: 1 addition & 1 deletion rust-runtime/aws-smithy-runtime/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "aws-smithy-runtime"
version = "1.5.0"
version = "1.5.2"
authors = ["AWS Rust SDK Team <[email protected]>", "Zelda Hessler <[email protected]>"]
description = "The new smithy runtime crate"
edition = "2021"
Expand Down
5 changes: 1 addition & 4 deletions rust-runtime/aws-smithy-runtime/src/client/defaults.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,6 @@ fn default_stalled_stream_protection_config_plugin_v2(
)
}

// TODO(https://github.com/smithy-lang/smithy-rs/issues/3523)
#[allow(dead_code)]
fn enforce_content_length_runtime_plugin() -> Option<SharedRuntimePlugin> {
Some(EnforceContentLengthRuntimePlugin::new().into_shared())
}
Expand Down Expand Up @@ -286,8 +284,7 @@ pub fn default_plugins(
default_sleep_impl_plugin(),
default_time_source_plugin(),
default_timeout_config_plugin(),
// TODO(https://github.com/smithy-lang/smithy-rs/issues/3523): Reenable this
/* enforce_content_length_runtime_plugin(), */
enforce_content_length_runtime_plugin(),
default_stalled_stream_protection_config_plugin_v2(behavior_version),
]
.into_iter()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,16 +138,17 @@ impl StandardRetryStrategy {
} else {
fastrand::f64()
};
let backoff = calculate_exponential_backoff(
Ok(calculate_exponential_backoff(
// Generate a random base multiplier to create jitter
base,
// Get the backoff time multiplier in seconds (with fractional seconds)
retry_cfg.initial_backoff().as_secs_f64(),
// `self.local.attempts` tracks number of requests made including the initial request
// The initial attempt shouldn't count towards backoff calculations, so we subtract it
request_attempts - 1,
);
Ok(Duration::from_secs_f64(backoff).min(retry_cfg.max_backoff()))
// Maximum backoff duration as a fallback to prevent overflow when calculating a power
retry_cfg.max_backoff(),
))
}
}
RetryAction::RetryForbidden | RetryAction::NoActionIndicated => {
Expand Down Expand Up @@ -288,11 +289,29 @@ fn check_rate_limiter_for_delay(
None
}

fn calculate_exponential_backoff(base: f64, initial_backoff: f64, retry_attempts: u32) -> f64 {
2_u32
fn calculate_exponential_backoff(
base: f64,
initial_backoff: f64,
retry_attempts: u32,
max_backoff: Duration,
) -> Duration {
let result = match 2_u32
.checked_pow(retry_attempts)
.map(|backoff| (backoff as f64) * base * initial_backoff)
.unwrap_or(f64::MAX)
.map(|power| (power as f64) * initial_backoff)
{
Some(backoff) => match Duration::try_from_secs_f64(backoff) {
Ok(result) => result.min(max_backoff),
Err(e) => {
tracing::warn!("falling back to {max_backoff:?} as `Duration` could not be created for exponential backoff: {e}");
max_backoff
}
},
None => max_backoff,
};

// Apply jitter to `result`, and note that it can be applied to `max_backoff`.
// Won't panic because `base` is either in range 0..1 or a constant 1 in testing (if configured).
result.mul_f64(base)
}

fn get_seconds_since_unix_epoch(runtime_components: &RuntimeComponents) -> f64 {
Expand Down Expand Up @@ -425,6 +444,23 @@ mod tests {
assert_eq!(ShouldAttempt::No, actual);
}

#[test]
fn should_not_panic_when_exponential_backoff_duration_could_not_be_created() {
let (ctx, rc, cfg) = set_up_cfg_and_context(
ErrorKind::TransientError,
// Greater than 32 when subtracted by 1 in `calculate_backoff`, causing overflow in `calculate_exponential_backoff`
33,
RetryConfig::standard()
.with_use_static_exponential_base(true)
.with_max_attempts(100), // Any value greater than 33 will do
);
let strategy = StandardRetryStrategy::new();
let actual = strategy
.should_attempt_retry(&ctx, &rc, &cfg)
.expect("method is infallible for this use");
assert_eq!(ShouldAttempt::YesAfterDelay(MAX_BACKOFF), actual);
}

#[derive(Debug)]
struct ServerError;
impl fmt::Display for ServerError {
Expand Down Expand Up @@ -868,14 +904,16 @@ mod tests {
assert_eq!(token_bucket.available_permits(), 480);
}

const MAX_BACKOFF: Duration = Duration::from_secs(20);

#[test]
fn calculate_exponential_backoff_where_initial_backoff_is_one() {
let initial_backoff = 1.0;

for (attempt, expected_backoff) in [initial_backoff, 2.0, 4.0].into_iter().enumerate() {
let actual_backoff =
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32);
assert_eq!(expected_backoff, actual_backoff);
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32, MAX_BACKOFF);
assert_eq!(Duration::from_secs_f64(expected_backoff), actual_backoff);
}
}

Expand All @@ -885,8 +923,8 @@ mod tests {

for (attempt, expected_backoff) in [initial_backoff, 6.0, 12.0].into_iter().enumerate() {
let actual_backoff =
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32);
assert_eq!(expected_backoff, actual_backoff);
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32, MAX_BACKOFF);
assert_eq!(Duration::from_secs_f64(expected_backoff), actual_backoff);
}
}

Expand All @@ -896,17 +934,17 @@ mod tests {

for (attempt, expected_backoff) in [initial_backoff, 0.06, 0.12].into_iter().enumerate() {
let actual_backoff =
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32);
assert_eq!(expected_backoff, actual_backoff);
calculate_exponential_backoff(1.0, initial_backoff, attempt as u32, MAX_BACKOFF);
assert_eq!(Duration::from_secs_f64(expected_backoff), actual_backoff);
}
}

#[test]
fn calculate_backoff_overflow() {
fn calculate_backoff_overflow_should_gracefully_fallback_to_max_backoff() {
// avoid overflow for a silly large amount of retry attempts
assert_eq!(
calculate_exponential_backoff(1_f64, 10_f64, 100000),
f64::MAX
MAX_BACKOFF,
calculate_exponential_backoff(1_f64, 10_f64, 100000, MAX_BACKOFF),
);
}
}
2 changes: 1 addition & 1 deletion tools/ci-build/runtime-versioner/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ opt-level = 0
[dependencies]
anyhow = "1.0.75"
camino = "1.1.6"
clap = { version = "4.4.11", features = ["derive"] }
clap = { version = "4.4.11", features = ["derive", "env"] }
crates-index = "2.3.0"
indicatif = "0.17.7"
reqwest = { version = "0.11.22", features = ["blocking"] }
Expand Down
4 changes: 3 additions & 1 deletion tools/ci-build/runtime-versioner/src/command/audit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ pub fn audit(args: Audit) -> Result<()> {
let previous_release_tag =
previous_release_tag(&repo, &release_tags, args.previous_release_tag.as_deref())?;
if release_tags.first() != Some(&previous_release_tag) {
tracing::warn!("there are newer releases since '{previous_release_tag}'");
tracing::warn!("there are newer releases since '{previous_release_tag}'. \
Consider specifying a more recent release tag using the `--previous-release-tag` command-line argument or \
the `SMITHY_RS_RUNTIME_VERSIONER_AUDIT_PREVIOUS_RELEASE_TAG` environment variable if audit fails.");
}

let next_crates = discover_runtime_crates(&repo.root).context("next")?;
Expand Down
2 changes: 1 addition & 1 deletion tools/ci-build/runtime-versioner/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pub struct Audit {
#[arg(long)]
no_fetch: bool,
/// Explicitly state the previous release's tag. Discovers it if not provided.
#[arg(long)]
#[arg(long, env = "SMITHY_RS_RUNTIME_VERSIONER_AUDIT_PREVIOUS_RELEASE_TAG")]
previous_release_tag: Option<String>,
/// Path to smithy-rs. Defaults to current working directory.
#[arg(long)]
Expand Down
41 changes: 34 additions & 7 deletions tools/ci-build/runtime-versioner/src/tag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,27 @@ pub fn previous_release_tag(
if !release_tags.contains(&tag_override) {
bail!("specified tag '{tag_override}' doesn't exist");
}
if !tag_is_ancestor(repo, &tag_override)? {
bail!("specified tag '{tag_override}' is not an ancestor to HEAD");
let previous_release_commit = commit_for_tag(repo, &ancestor_tag)?;
let previous_release_override_commit = commit_for_tag(repo, &tag_override)?;
// The first guard says that whenever we override a previous release tag, `HEAD` of our branch
// should be ahead of that override.
// The second guard handles a case where our branch is now behind w.r.t the latest smithy-rs
// release. This can happen when we `git merge main` into the branch, but the main branch
// now contains a new smithy-rs release that the `HEAD` of the branch doesn't know about.
// In that case, we want to teach the tool that the previous release should now be the new
// release. However, specifying the new release for `previous_release_override_commit` fails
// with the first guard because `HEAD` doesn't know about the new release. The second guard
// provides an escape hatch where if `previous_release_commit` (the latest release currently
// `HEAD` does know about) is the ancestor to the specified previous release override, then
// we can now treat the previous release override as a legitimate previous release.
if !is_ancestor(repo, &previous_release_override_commit, "HEAD")?
&& !is_ancestor(
repo,
&previous_release_commit,
&previous_release_override_commit,
)?
{
bail!("specified tag '{tag_override}' is neither the ancestor of HEAD nor a descendant of {ancestor_tag}");
}
if tag_override != ancestor_tag {
warn!(
Expand Down Expand Up @@ -83,19 +102,27 @@ pub fn release_tags(repo: &Repo) -> Result<Vec<ReleaseTag>> {
Ok(tags)
}

/// True if the given tag is an ancestor to HEAD.
fn tag_is_ancestor(repo: &Repo, tag: &ReleaseTag) -> Result<bool> {
let commit = commit_for_tag(repo, tag)?;
/// True if the given `ancestor_commit` is an ancestor to `descendant_commit`
fn is_ancestor(repo: &Repo, ancestor_commit: &str, descendant_commit: &str) -> Result<bool> {
let status = repo
.git(["merge-base", "--is-ancestor", &commit, "HEAD"])
.expect_status_one_of("determine if a tag is the ancestor to HEAD", [0, 1])?;
.git([
"merge-base",
"--is-ancestor",
ancestor_commit,
descendant_commit,
])
.expect_status_one_of(
"determine if {ancestor_commit} is the ancestor to {descendant_commit}",
[0, 1],
)?;
Ok(status == 0)
}

/// Returns the commit hash for the given tag
fn commit_for_tag(repo: &Repo, tag: &ReleaseTag) -> Result<String> {
repo.git(["rev-list", "-n1", tag.as_str()])
.expect_success_output("retrieve commit hash for tag")
.map(|result| result.trim().to_string())
}

#[cfg(test)]
Expand Down

0 comments on commit ce189b0

Please sign in to comment.