-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Support S3 unsigned body #3323
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: version-3
Are you sure you want to change the base?
Support S3 unsigned body #3323
Changes from all commits
da5692a
20bd2b8
392d536
1a761ec
c09bb65
dc9b603
192d651
5d453b3
81dfafd
14b7f34
06e08f2
ad14d42
de72cf1
4abf3f8
4fbff2c
41ae00c
c935ea7
cc98a36
81b6be5
5426bbb
78554ac
cf84f02
1bfa7ec
104cf03
0663037
98f450c
a443e69
599be96
eb3377c
aacb57a
c083d27
15a4f12
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -9,10 +9,10 @@ class ServiceEnumerator | |
| MANIFEST_PATH = File.expand_path('../../services.json', __FILE__) | ||
|
|
||
| # Minimum `aws-sdk-core` version for new gem builds | ||
| MINIMUM_CORE_VERSION = "3.239.1" | ||
| MINIMUM_CORE_VERSION = "3.240.0" | ||
|
|
||
| # Minimum `aws-sdk-core` version for new S3 gem builds | ||
| MINIMUM_CORE_VERSION_S3 = "3.234.0" | ||
| MINIMUM_CORE_VERSION_S3 = "3.240.0" | ||
|
Comment on lines
-12
to
+15
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Self-reminder to update the minimum core version when this is ready for release. |
||
|
|
||
| EVENTSTREAM_PLUGIN = "Aws::Plugins::EventStreamConfiguration" | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,6 +5,7 @@ module Plugins | |
| # @api private | ||
| class ChecksumAlgorithm < Seahorse::Client::Plugin | ||
| CHUNK_SIZE = 1 * 1024 * 1024 # one MB | ||
| MIN_CHUNK_SIZE = 16_384 # 16 KB | ||
|
|
||
| # determine the set of supported client side checksum algorithms | ||
| # CRC32c requires aws-crt (optional sdk dependency) for support | ||
|
|
@@ -162,9 +163,7 @@ def call(context) | |
| context[:http_checksum] ||= {} | ||
|
|
||
| # Set validation mode to enabled when supported. | ||
| if context.config.response_checksum_validation == 'when_supported' | ||
| enable_request_validation_mode(context) | ||
| end | ||
| enable_request_validation_mode(context) if context.config.response_checksum_validation == 'when_supported' | ||
|
|
||
| @handler.call(context) | ||
| end | ||
|
|
@@ -194,9 +193,7 @@ def call(context) | |
| calculate_request_checksum(context, request_algorithm) | ||
| end | ||
|
|
||
| if should_verify_response_checksum?(context) | ||
| add_verify_response_checksum_handlers(context) | ||
| end | ||
| add_verify_response_checksum_handlers(context) if should_verify_response_checksum?(context) | ||
|
|
||
| with_metrics(context.config, algorithm) { @handler.call(context) } | ||
| end | ||
|
|
@@ -334,6 +331,11 @@ def calculate_request_checksum(context, checksum_properties) | |
| if (algorithm_header = checksum_properties[:request_algorithm_header]) | ||
| headers[algorithm_header] = checksum_properties[:algorithm] | ||
| end | ||
|
|
||
| # Trailer implementation within JRUBY environment is facing some | ||
| # network issues that will need further investigation | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe link to the possible open issue? |
||
| return apply_request_checksum(context, headers, checksum_properties) if defined?(JRUBY_VERSION) | ||
|
|
||
| case checksum_properties[:in] | ||
| when 'header' | ||
| apply_request_checksum(context, headers, checksum_properties) | ||
|
|
@@ -346,17 +348,18 @@ def calculate_request_checksum(context, checksum_properties) | |
|
|
||
| def apply_request_checksum(context, headers, checksum_properties) | ||
| header_name = checksum_properties[:name] | ||
| body = context.http_request.body_contents | ||
| headers[header_name] = calculate_checksum( | ||
| checksum_properties[:algorithm], | ||
| body | ||
| context.http_request.body | ||
jterapin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| ) | ||
| end | ||
|
|
||
| def calculate_checksum(algorithm, body) | ||
| digest = ChecksumAlgorithm.digest_for_algorithm(algorithm) | ||
| if body.respond_to?(:read) | ||
| body.rewind | ||
| update_in_chunks(digest, body) | ||
| body.rewind | ||
| else | ||
| digest.update(body) | ||
| end | ||
|
|
@@ -388,13 +391,14 @@ def apply_request_trailer_checksum(context, headers, checksum_properties) | |
| unless context.http_request.body.respond_to?(:size) | ||
| raise Aws::Errors::ChecksumError, 'Could not determine length of the body' | ||
| end | ||
| headers['X-Amz-Decoded-Content-Length'] = context.http_request.body.size | ||
|
|
||
| context.http_request.body = AwsChunkedTrailerDigestIO.new( | ||
| context.http_request.body, | ||
| checksum_properties[:algorithm], | ||
| location_name | ||
| ) | ||
| headers['X-Amz-Decoded-Content-Length'] = context.http_request.body.size | ||
| context.http_request.body = | ||
| AwsChunkedTrailerDigestIO.new( | ||
| io: context.http_request.body, | ||
| algorithm: checksum_properties[:algorithm], | ||
| location_name: location_name | ||
| ) | ||
| end | ||
|
|
||
| def should_verify_response_checksum?(context) | ||
|
|
@@ -417,10 +421,7 @@ def add_verify_response_headers_handler(context, checksum_context) | |
| context[:http_checksum][:validation_list] = validation_list | ||
|
|
||
| context.http_response.on_headers do |_status, headers| | ||
| header_name, algorithm = response_header_to_verify( | ||
| headers, | ||
| validation_list | ||
| ) | ||
| header_name, algorithm = response_header_to_verify(headers, validation_list) | ||
| next unless header_name | ||
|
|
||
| expected = headers[header_name] | ||
|
|
@@ -466,52 +467,87 @@ def response_header_to_verify(headers, validation_list) | |
| # Wrapper for request body that implements application-layer | ||
| # chunking with Digest computed on chunks + added as a trailer | ||
| class AwsChunkedTrailerDigestIO | ||
| CHUNK_SIZE = 16_384 | ||
|
|
||
| def initialize(io, algorithm, location_name) | ||
| @io = io | ||
| @location_name = location_name | ||
| @algorithm = algorithm | ||
| @digest = ChecksumAlgorithm.digest_for_algorithm(algorithm) | ||
| @trailer_io = nil | ||
| def initialize(options = {}) | ||
| @io = options.delete(:io) | ||
| @location_name = options.delete(:location_name) | ||
| @algorithm = options.delete(:algorithm) | ||
| @digest = ChecksumAlgorithm.digest_for_algorithm(@algorithm) | ||
| @chunk_size = Thread.current[:net_http_override_body_stream_chunk] || MIN_CHUNK_SIZE | ||
| @overhead_bytes = calculate_overhead(@chunk_size) | ||
| @base_chunk_size = @chunk_size - @overhead_bytes | ||
| @buffer = +'' | ||
| @current_chunk = +'' | ||
| @eof = false | ||
| end | ||
|
|
||
| # the size of the application layer aws-chunked + trailer body | ||
| def size | ||
| # compute the number of chunks | ||
| # a full chunk has 4 + 4 bytes overhead, a partial chunk is len.to_s(16).size + 4 | ||
| orig_body_size = @io.size | ||
| n_full_chunks = orig_body_size / CHUNK_SIZE | ||
| partial_bytes = orig_body_size % CHUNK_SIZE | ||
| chunked_body_size = n_full_chunks * (CHUNK_SIZE + 8) | ||
| chunked_body_size += partial_bytes.to_s(16).size + partial_bytes + 4 unless partial_bytes.zero? | ||
| n_full_chunks = orig_body_size / @base_chunk_size | ||
| partial_bytes = orig_body_size % @base_chunk_size | ||
|
|
||
| chunked_body_size = n_full_chunks * (@base_chunk_size + @base_chunk_size.to_s(16).size + 4) | ||
| chunked_body_size += partial_bytes.to_s(16).size + partial_bytes + 4 unless partial_bytes.zero? | ||
| trailer_size = ChecksumAlgorithm.trailer_length(@algorithm, @location_name) | ||
| chunked_body_size + trailer_size | ||
| end | ||
|
|
||
| def rewind | ||
| @io.rewind | ||
| @buffer = +'' | ||
| @current_chunk = +'' | ||
| @eof = false | ||
| end | ||
|
|
||
| def read(length, buf = nil) | ||
| # account for possible leftover bytes at the end, if we have trailer bytes, send them | ||
| if @trailer_io | ||
| return @trailer_io.read(length, buf) | ||
| def read(length = nil, buf = nil) | ||
| return @io.read unless length | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm a little confused about this - this means we won't use AWS chunked encoding when we're not given a length? |
||
|
|
||
| return if @eof && @buffer.empty? && @current_chunk.empty? | ||
|
|
||
| buf&.clear | ||
| output_buffer = buf || +'' | ||
|
|
||
| while output_buffer.bytesize < length # fill until output buffer is ready | ||
| unless @buffer.empty? | ||
| take = [length - output_buffer.bytesize, @buffer.bytesize].min | ||
| slice_data = @buffer.slice!(0, take) | ||
| output_buffer << slice_data | ||
| next | ||
| end | ||
|
|
||
| unless @current_chunk.empty? | ||
| take = [length - output_buffer.bytesize, @current_chunk.bytesize].min | ||
| slice_data = @current_chunk.slice!(0, take) | ||
| output_buffer << slice_data | ||
| next | ||
| end | ||
|
|
||
| if !@eof | ||
| fill_chunk | ||
| else | ||
| break | ||
| end | ||
| end | ||
|
|
||
| chunk = @io.read(length) | ||
| if chunk | ||
| output_buffer | ||
| end | ||
|
|
||
| private | ||
|
|
||
| def calculate_overhead(chunk_size) | ||
| chunk_size.to_s(16).size + 4 # hex_length + "\r\n\r\n" | ||
| end | ||
|
|
||
| def fill_chunk | ||
| chunk = @io.read(@base_chunk_size) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What happens here is fewer (but non-zero) bytes than the base_chunk_size are returned? (I can't 100% remember Ruby, but I think IO objects are allowed to return <= the requested length when the rest of the bytes aren't yet available) |
||
| if chunk && !chunk.empty? | ||
| @digest.update(chunk) | ||
| application_chunked = "#{chunk.bytesize.to_s(16)}\r\n#{chunk}\r\n" | ||
| return StringIO.new(application_chunked).read(application_chunked.size, buf) | ||
| @current_chunk << "#{chunk.bytesize.to_s(16)}\r\n#{chunk}\r\n" | ||
| else | ||
| trailers = {} | ||
| trailers[@location_name] = @digest.base64digest | ||
| trailers = trailers.map { |k,v| "#{k}:#{v}" }.join("\r\n") | ||
| @trailer_io = StringIO.new("0\r\n#{trailers}\r\n\r\n") | ||
| chunk = @trailer_io.read(length, buf) | ||
| trailer_str = { @location_name => @digest.base64digest }.map { |k, v| "#{k}:#{v}" }.join("\r\n") | ||
| @current_chunk << "0\r\n#{trailer_str}\r\n\r\n" | ||
| @eof = true | ||
| end | ||
| chunk | ||
| end | ||
| end | ||
| end | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.