Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

xds/resolver: Add support for cluster specifier plugins #4987

Merged
merged 7 commits into from Dec 6, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
70 changes: 51 additions & 19 deletions xds/internal/resolver/serviceconfig.go
Expand Up @@ -38,6 +38,7 @@ import (
"google.golang.org/grpc/status"
"google.golang.org/grpc/xds/internal/balancer/clustermanager"
"google.golang.org/grpc/xds/internal/balancer/ringhash"
"google.golang.org/grpc/xds/internal/clusterspecifier"
"google.golang.org/grpc/xds/internal/httpfilter"
"google.golang.org/grpc/xds/internal/httpfilter/router"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
Expand Down Expand Up @@ -83,12 +84,23 @@ func (r *xdsResolver) pruneActiveClusters() {
// serviceConfigJSON produces a service config in JSON format representing all
// the clusters referenced in activeClusters. This includes clusters with zero
// references, so they must be pruned first.
func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) {
func serviceConfigJSON(activeClusters map[string]*clusterInfo, clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig) ([]byte, error) {
// Generate children (all entries in activeClusters).
children := make(map[string]xdsChildConfig)
for cluster := range activeClusters {
children[cluster] = xdsChildConfig{
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
// Look into cluster specifier plugins, which hasn't had any prefix attached to it's cluster specifier plugin names,
// to determine the LB Config if the cluster is a CSP.
cspCfg, ok := clusterSpecifierPlugins[strings.TrimPrefix(cluster, "cluster_specifier_plugin:")]
if ok {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we handling this case correctly (I think we're not):

  1. First xDS update contains a cluster specifier plugin for RLS
  2. RPC starts that selects that CSP
  3. New xDS update removes that CSP
  4. When generating this JSON here, we need to maintain the CSP config data and keep the "cluster" it references around until (2) is done

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I found a solution that solves this problem and the comment about name collisions on the branch in serviceConfigJSON. I simply added the csp balancer configuration (if the cluster is a csp) to the cluster info. This reuses all the plumbing around active clusters, and also solves the correctness issue on the branch of the cluster specifier, as you can branch on clusterInfo.cspCfg != nil.

children[cluster] = xdsChildConfig{
ChildPolicy: balancerConfig(cspCfg),
}
} else {
// Will now have "cluster:" prefixing the cluster name...CDS policy
// will now have to trim this off when it queries CDS.
children[cluster] = xdsChildConfig{
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
}
}
}

Expand Down Expand Up @@ -134,11 +146,15 @@ func (r route) String() string {
}

type configSelector struct {
r *xdsResolver
virtualHost virtualHost
routes []route
clusters map[string]*clusterInfo
httpFilterConfig []xdsresource.HTTPFilter
r *xdsResolver
virtualHost virtualHost
routes []route
clusters map[string]*clusterInfo
httpFilterConfig []xdsresource.HTTPFilter
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig
// Will be used for:
// a. serviceConfigJSON (this will build out the service config...but you still need to keep ref
// counts in active clusters), this will be used to get the LB Configurations.
}

var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
Expand All @@ -158,10 +174,13 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
if rt == nil || rt.clusters == nil {
return nil, errNoMatchedRouteFound
}

cluster, ok := rt.clusters.Next().(*routeCluster)
if !ok {
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster)
}


// Add a ref to the selected cluster, as this RPC needs this cluster until
// it is committed.
ref := &cs.clusters[cluster.name].refCount
Expand Down Expand Up @@ -353,21 +372,34 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro

for i, rt := range su.virtualHost.Routes {
clusters := newWRR()
for cluster, wc := range rt.WeightedClusters {
if rt.ClusterSpecifierPlugin != "" {
clusters.Add(&routeCluster{
name: cluster,
httpFilterConfigOverride: wc.HTTPFilterConfigOverride,
}, int64(wc.Weight))

// Initialize entries in cs.clusters map, creating entries in
// r.activeClusters as necessary. Set to zero as they will be
// incremented by incRefs.
ci := r.activeClusters[cluster]
name: "cluster:" + rt.ClusterSpecifierPlugin,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this be "cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin?

Please use a local to hold that instead of repeating it 4 times. Also, global consts for "cluster:" and "cluster_specifier_plugin:" would be a good idea to avoid any chance of a typo in one usage.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, good catch. Added global consts and also a local var for both branches.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, the comment isn't showing up on this PR, but I refactored the cluster initialization into another function. It made it much cleaner.

}, 1)

ci := r.activeClusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin]
if ci == nil {
ci = &clusterInfo{refCount: 0}
r.activeClusters[cluster] = ci
r.activeClusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin] = ci
}
cs.clusters["cluster_specifier_plugin:" + rt.ClusterSpecifierPlugin] = ci
} else {
for cluster, wc := range rt.WeightedClusters {
clusters.Add(&routeCluster{
name: "cluster:" + cluster,
httpFilterConfigOverride: wc.HTTPFilterConfigOverride,
}, int64(wc.Weight))

// Initialize entries in cs.clusters map, creating entries in
// r.activeClusters as necessary. Set to zero as they will be
// incremented by incRefs.
ci := r.activeClusters["cluster:" + cluster]
if ci == nil {
ci = &clusterInfo{refCount: 0}
r.activeClusters["cluster:" + cluster] = ci
}
cs.clusters["cluster:" + cluster] = ci
}
cs.clusters[cluster] = ci
}
cs.routes[i].clusters = clusters

Expand Down
11 changes: 8 additions & 3 deletions xds/internal/resolver/watch_service.go
Expand Up @@ -25,6 +25,7 @@ import (

"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/xds/internal/clusterspecifier"
"google.golang.org/grpc/xds/internal/xdsclient"
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
)
Expand All @@ -35,6 +36,9 @@ import (
type serviceUpdate struct {
// virtualHost contains routes and other configuration to route RPCs.
virtualHost *xdsresource.VirtualHost
// clusterSpecifierPlugins contain the configurations for any cluster
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: containS (the ...Plugins map is a singular)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I see, switched.

// specifier plugins emitted by the xdsclient.
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig
// ldsConfig contains configuration that applies to all routes.
ldsConfig ldsConfig
}
Expand Down Expand Up @@ -120,7 +124,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate,
}

// Handle the inline RDS update as if it's from an RDS watch.
w.updateVirtualHostsFromRDS(*update.InlineRouteConfig)
w.applyRouteConfigUpdate(*update.InlineRouteConfig)
return
}

Expand Down Expand Up @@ -151,7 +155,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate,
w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp)
}

func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.RouteConfigUpdate) {
func (w *serviceUpdateWatcher) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) {
matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts)
if matchVh == nil {
// No matching virtual host found.
Expand All @@ -160,6 +164,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.Rout
}

w.lastUpdate.virtualHost = matchVh
w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins
dfawley marked this conversation as resolved.
Show resolved Hide resolved
w.serviceCb(w.lastUpdate, nil)
}

Expand All @@ -179,7 +184,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdat
w.serviceCb(serviceUpdate{}, err)
return
}
w.updateVirtualHostsFromRDS(update)
w.applyRouteConfigUpdate(update)
}

func (w *serviceUpdateWatcher) close() {
Expand Down
2 changes: 1 addition & 1 deletion xds/internal/resolver/xds_resolver.go
Expand Up @@ -206,7 +206,7 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool {
}

// Produce the service config.
sc, err := serviceConfigJSON(r.activeClusters)
sc, err := serviceConfigJSON(r.activeClusters, cs.clusterSpecifierPlugins)
if err != nil {
// JSON marshal error; should never happen.
r.logger.Errorf("%v", err)
Expand Down