Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 7 additions & 12 deletions source/common/router/router.cc
Original file line number Diff line number Diff line change
Expand Up @@ -815,6 +815,8 @@ bool Filter::continueDecodeHeaders(Upstream::ThreadLocalCluster* cluster,
createRetryState(*getEffectiveRetryPolicy(), headers, *cluster_, config_->factory_context_,
callbacks_->dispatcher(), route_entry_->priority());

absl::InlinedVector<std::reference_wrapper<const ShadowPolicy>, 4> active_shadow_policies;

// Determine which shadow policies to use. It's possible that we don't do any shadowing due to
// runtime keys. Also the method CONNECT doesn't support shadowing.
auto method = headers.getMethodValue();
Expand All @@ -828,8 +830,7 @@ bool Filter::continueDecodeHeaders(Upstream::ThreadLocalCluster* cluster,
for (const auto& shadow_policy : policies_to_evaluate) {
const auto& policy_ref = *shadow_policy;
if (FilterUtility::shouldShadow(policy_ref, config_->runtime_, callbacks_->streamId())) {
active_shadow_policies_.push_back(std::cref(policy_ref));
shadow_headers_ = Http::createHeaderMap<Http::RequestHeaderMapImpl>(*downstream_headers_);
active_shadow_policies.push_back(std::cref(policy_ref));
Comment on lines -831 to +834
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's unnecessary to copy the downstream headers again and again. And actually the shadow_headers_ will not be used actually.

}
}
}
Expand All @@ -852,14 +853,14 @@ bool Filter::continueDecodeHeaders(Upstream::ThreadLocalCluster* cluster,
LinkedList::moveIntoList(std::move(upstream_request), upstream_requests_);
upstream_requests_.front()->acceptHeadersFromRouter(end_stream);
// Start the shadow streams.
for (const auto& shadow_policy_wrapper : active_shadow_policies_) {
for (const auto& shadow_policy_wrapper : active_shadow_policies) {
const auto& shadow_policy = shadow_policy_wrapper.get();
const absl::optional<absl::string_view> shadow_cluster_name =
getShadowCluster(shadow_policy, *downstream_headers_);
if (!shadow_cluster_name.has_value()) {
continue;
}
auto shadow_headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(*shadow_headers_);
auto shadow_headers = Http::createHeaderMap<Http::RequestHeaderMapImpl>(*downstream_headers_);
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks like a behavior change since UpstreamRequest construction injects trace context into *parent_.downstreamHeaders(), and this is after the acceptHeadersFromRouter(), so it can make mirrored requests inherit upstream mutations, but before not?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good point, I forgot the trace injection. I will update it.

applyShadowPolicyHeaders(shadow_policy, *shadow_headers);
const auto options =
Http::AsyncClient::RequestOptions()
Expand All @@ -883,9 +884,7 @@ bool Filter::continueDecodeHeaders(Upstream::ThreadLocalCluster* cluster,
if (end_stream) {
// This is a header-only request, and can be dispatched immediately to the shadow
// without waiting.
Http::RequestMessagePtr request(new Http::RequestMessageImpl(
Http::createHeaderMap<Http::RequestHeaderMapImpl>(*shadow_headers_)));
applyShadowPolicyHeaders(shadow_policy, request->headers());
Http::RequestMessagePtr request(new Http::RequestMessageImpl(std::move(shadow_headers)));
Comment on lines -886 to +898
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to reuse the previously copied one shadow_headers rather then to copy another one here.

config_->shadowWriter().shadow(std::string(shadow_cluster_name.value()), std::move(request),
options);
} else {
Expand Down Expand Up @@ -1091,10 +1090,6 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea
Http::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap& trailers) {
ENVOY_STREAM_LOG(debug, "router decoding trailers:\n{}", *callbacks_, trailers);

if (shadow_headers_) {
shadow_trailers_ = Http::createHeaderMap<Http::RequestTrailerMapImpl>(trailers);
}
Comment on lines -1094 to -1096
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is unnecessary because we will always to copy a new trailers map for every shadow stream.


// upstream_requests_.size() cannot be > 1 because that only happens when a per
// try timeout occurs with hedge_on_per_try_timeout enabled but the per
// try timeout timer is not started until onRequestComplete(). It could be zero
Expand All @@ -1109,7 +1104,7 @@ Http::FilterTrailersStatus Filter::decodeTrailers(Http::RequestTrailerMap& trail
shadow_stream->removeDestructorCallback();
shadow_stream->removeWatermarkCallbacks();
shadow_stream->captureAndSendTrailers(
Http::createHeaderMap<Http::RequestTrailerMapImpl>(*shadow_trailers_));
Http::createHeaderMap<Http::RequestTrailerMapImpl>(trailers));
}
shadow_streams_.clear();

Expand Down
3 changes: 0 additions & 3 deletions source/common/router/router.h
Original file line number Diff line number Diff line change
Expand Up @@ -659,9 +659,6 @@ class Filter : Logger::Loggable<Logger::Id::router>,
MetadataMatchCriteriaConstPtr metadata_match_;
std::function<void(Http::ResponseHeaderMap&)> modify_headers_;
std::function<void(Http::ResponseHeaderMap&)> modify_headers_from_upstream_lb_;
std::vector<std::reference_wrapper<const ShadowPolicy>> active_shadow_policies_;
std::unique_ptr<Http::RequestHeaderMap> shadow_headers_;
std::unique_ptr<Http::RequestTrailerMap> shadow_trailers_;
// The stream lifetime configured by request header.
absl::optional<std::chrono::milliseconds> dynamic_max_stream_duration_;
// list of cookies to add to upstream headers
Expand Down
Loading