New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
xds/resolver: Add support for cluster specifier plugins #4987
Changes from 1 commit
44160e9
8a6bf55
a616529
6393ab4
af713af
d219ac6
5b4f327
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,6 +38,7 @@ import ( | |
"google.golang.org/grpc/status" | ||
"google.golang.org/grpc/xds/internal/balancer/clustermanager" | ||
"google.golang.org/grpc/xds/internal/balancer/ringhash" | ||
"google.golang.org/grpc/xds/internal/clusterspecifier" | ||
"google.golang.org/grpc/xds/internal/httpfilter" | ||
"google.golang.org/grpc/xds/internal/httpfilter/router" | ||
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource" | ||
|
@@ -83,12 +84,23 @@ func (r *xdsResolver) pruneActiveClusters() { | |
// serviceConfigJSON produces a service config in JSON format representing all | ||
// the clusters referenced in activeClusters. This includes clusters with zero | ||
// references, so they must be pruned first. | ||
func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { | ||
func serviceConfigJSON(activeClusters map[string]*clusterInfo, clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig) ([]byte, error) { | ||
// Generate children (all entries in activeClusters). | ||
children := make(map[string]xdsChildConfig) | ||
for cluster := range activeClusters { | ||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), | ||
// Look into cluster specifier plugins, which hasn't had any prefix attached to it's cluster specifier plugin names, | ||
// to determine the LB Config if the cluster is a CSP. | ||
cspCfg, ok := clusterSpecifierPlugins[strings.TrimPrefix(cluster, "cluster_specifier_plugin:")] | ||
if ok { | ||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: balancerConfig(cspCfg), | ||
} | ||
} else { | ||
// Will now have "cluster:" prefixing the cluster name...CDS policy | ||
// will now have to trim this off when it queries CDS. | ||
children[cluster] = xdsChildConfig{ | ||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), | ||
} | ||
} | ||
} | ||
|
||
|
@@ -121,7 +133,11 @@ type routeCluster struct { | |
|
||
type route struct { | ||
m *xdsresource.CompositeMatcher // converted from route matchers | ||
|
||
// Exactly one of clusterSpecifierPlugin or clusters will be set. | ||
clusterSpecifierPlugin string | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we put the CSP name into clusters instead so we don't need another field? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see what your saying (persist as little state as possible), but I much prefer keeping this as a separate field. Adding csp to clusters would conflate an individual csp name to a WRR type which can hold onto many cluster names and chooses one randomly. This keeps the branching logic (two fields in state, two separate ways of persistence/handling), which started from the xdsclient. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's not about "persisting state", it's about reducing & simplifying code and data structures. We use the same code paths already for when WRR is used vs. when a single cluster is specified directly, so why not fold CSP into it as well? In gRPC, they are equivalent: CSPs are the same as ordinary clusters from the name resolver and cluster manager LB policy's perspectives. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Kept the branching in newConfigSelector (for prefix + hardcoded weight 1 logic, which was in unmarshal_rds for singular cluster)...but now it's one codepath in SelectConfig/one piece of state clusters. |
||
clusters wrr.WRR // holds *routeCluster entries | ||
|
||
maxStreamDuration time.Duration | ||
// map from filter name to its config | ||
httpFilterConfigOverride map[string]httpfilter.FilterConfig | ||
|
@@ -134,11 +150,15 @@ func (r route) String() string { | |
} | ||
|
||
type configSelector struct { | ||
r *xdsResolver | ||
virtualHost virtualHost | ||
routes []route | ||
clusters map[string]*clusterInfo | ||
httpFilterConfig []xdsresource.HTTPFilter | ||
r *xdsResolver | ||
virtualHost virtualHost | ||
routes []route | ||
clusters map[string]*clusterInfo | ||
httpFilterConfig []xdsresource.HTTPFilter | ||
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig | ||
// Will be used for: | ||
// a. serviceConfigJSON (this will build out the service config...but you still need to keep ref | ||
// counts in active clusters), this will be used to get the LB Configurations. | ||
} | ||
|
||
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") | ||
|
@@ -158,10 +178,19 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP | |
if rt == nil || rt.clusters == nil { | ||
return nil, errNoMatchedRouteFound | ||
} | ||
cluster, ok := rt.clusters.Next().(*routeCluster) | ||
if !ok { | ||
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) | ||
|
||
var cluster *routeCluster | ||
if rt.clusterSpecifierPlugin != "" { | ||
cluster.name = rt.clusterSpecifierPlugin | ||
// cluster.httpFilterConfigOverride = /*Do cluster specifier plugins even have http filter config overrides*/ | ||
} else { | ||
cluster, ok := rt.clusters.Next().(*routeCluster) | ||
if !ok { | ||
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) | ||
} | ||
} | ||
|
||
|
||
// Add a ref to the selected cluster, as this RPC needs this cluster until | ||
// it is committed. | ||
ref := &cs.clusters[cluster.name].refCount | ||
|
@@ -352,24 +381,35 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro | |
} | ||
|
||
for i, rt := range su.virtualHost.Routes { | ||
clusters := newWRR() | ||
for cluster, wc := range rt.WeightedClusters { | ||
clusters.Add(&routeCluster{ | ||
name: cluster, | ||
httpFilterConfigOverride: wc.HTTPFilterConfigOverride, | ||
}, int64(wc.Weight)) | ||
|
||
// Initialize entries in cs.clusters map, creating entries in | ||
// r.activeClusters as necessary. Set to zero as they will be | ||
// incremented by incRefs. | ||
ci := r.activeClusters[cluster] | ||
if rt.ClusterSpecifierPlugin != "" { | ||
ci := r.activeClusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin] | ||
if ci == nil { | ||
ci = &clusterInfo{refCount: 0} | ||
r.activeClusters[cluster] = ci | ||
r.activeClusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin] = ci | ||
} | ||
cs.clusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin] = ci | ||
|
||
cs.routes[i].clusterSpecifierPlugin = "cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin | ||
} else { | ||
clusters := newWRR() | ||
for cluster, wc := range rt.WeightedClusters { | ||
clusters.Add(&routeCluster{ | ||
name: "cluster:" + cluster, | ||
httpFilterConfigOverride: wc.HTTPFilterConfigOverride, | ||
}, int64(wc.Weight)) | ||
|
||
// Initialize entries in cs.clusters map, creating entries in | ||
// r.activeClusters as necessary. Set to zero as they will be | ||
// incremented by incRefs. | ||
ci := r.activeClusters["cluster:" + cluster] | ||
if ci == nil { | ||
ci = &clusterInfo{refCount: 0} | ||
r.activeClusters["cluster:" + cluster] = ci | ||
} | ||
cs.clusters["cluster:" + cluster] = ci | ||
} | ||
cs.clusters[cluster] = ci | ||
cs.routes[i].clusters = clusters | ||
} | ||
cs.routes[i].clusters = clusters | ||
|
||
var err error | ||
cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,6 +25,7 @@ import ( | |
|
||
"google.golang.org/grpc/internal/grpclog" | ||
"google.golang.org/grpc/internal/pretty" | ||
"google.golang.org/grpc/xds/internal/clusterspecifier" | ||
"google.golang.org/grpc/xds/internal/xdsclient" | ||
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource" | ||
) | ||
|
@@ -35,6 +36,9 @@ import ( | |
type serviceUpdate struct { | ||
// virtualHost contains routes and other configuration to route RPCs. | ||
virtualHost *xdsresource.VirtualHost | ||
// clusterSpecifierPlugins contain the configurations for any cluster | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: containS (the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, I see, switched. |
||
// specifier plugins emitted by the xdsclient. | ||
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig | ||
// ldsConfig contains configuration that applies to all routes. | ||
ldsConfig ldsConfig | ||
} | ||
|
@@ -160,6 +164,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.Rout | |
} | ||
|
||
w.lastUpdate.virtualHost = matchVh | ||
w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins | ||
dfawley marked this conversation as resolved.
Show resolved
Hide resolved
|
||
w.serviceCb(w.lastUpdate, nil) | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Are we handling this case correctly (I think we're not):
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah, I found a solution that solves this problem and the comment about name collisions on the branch in serviceConfigJSON. I simply added the csp balancer configuration (if the cluster is a csp) to the cluster info. This reuses all the plumbing around active clusters, and also solves the correctness issue on the branch of the cluster specifier, as you can branch on clusterInfo.cspCfg != nil.