@@ -29,6 +29,7 @@ import (
29
29
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
30
30
"k8s.io/autoscaler/cluster-autoscaler/metrics"
31
31
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
32
+ "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
32
33
"k8s.io/autoscaler/cluster-autoscaler/utils/backoff"
33
34
"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
34
35
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -141,6 +142,7 @@ type ClusterStateRegistry struct {
141
142
cloudProviderNodeInstancesCache * utils.CloudProviderNodeInstancesCache
142
143
interrupt chan struct {}
143
144
nodeGroupConfigProcessor nodegroupconfig.NodeGroupConfigProcessor
145
+ asyncNodeGroupStateChecker asyncnodegroups.AsyncNodeGroupStateChecker
144
146
145
147
// scaleUpFailures contains information about scale-up failures for each node group. It should be
146
148
// cleared periodically to avoid unnecessary accumulation.
@@ -155,7 +157,7 @@ type NodeGroupScalingSafety struct {
155
157
}
156
158
157
159
// NewClusterStateRegistry creates new ClusterStateRegistry.
158
- func NewClusterStateRegistry (cloudProvider cloudprovider.CloudProvider , config ClusterStateRegistryConfig , logRecorder * utils.LogEventRecorder , backoff backoff.Backoff , nodeGroupConfigProcessor nodegroupconfig.NodeGroupConfigProcessor ) * ClusterStateRegistry {
160
+ func NewClusterStateRegistry (cloudProvider cloudprovider.CloudProvider , config ClusterStateRegistryConfig , logRecorder * utils.LogEventRecorder , backoff backoff.Backoff , nodeGroupConfigProcessor nodegroupconfig.NodeGroupConfigProcessor , asyncNodeGroupStateChecker asyncnodegroups. AsyncNodeGroupStateChecker ) * ClusterStateRegistry {
159
161
return & ClusterStateRegistry {
160
162
scaleUpRequests : make (map [string ]* ScaleUpRequest ),
161
163
scaleDownRequests : make ([]* ScaleDownRequest , 0 ),
@@ -175,6 +177,7 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C
175
177
interrupt : make (chan struct {}),
176
178
scaleUpFailures : make (map [string ][]ScaleUpFailure ),
177
179
nodeGroupConfigProcessor : nodeGroupConfigProcessor ,
180
+ asyncNodeGroupStateChecker : asyncNodeGroupStateChecker ,
178
181
}
179
182
}
180
183
@@ -684,7 +687,7 @@ func (csr *ClusterStateRegistry) updateIncorrectNodeGroupSizes(currentTime time.
684
687
klog .Warningf ("Acceptable range for node group %s not found" , nodeGroup .Id ())
685
688
continue
686
689
}
687
- if nodeGroup . IsUpcoming () {
690
+ if csr . asyncNodeGroupStateChecker . IsUpcoming (nodeGroup ) {
688
691
// Nodes for upcoming node groups reside in-memory and wait for node group to be fully
689
692
// created. There is no need to mark their sizes incorrect.
690
693
continue
@@ -986,7 +989,7 @@ func (csr *ClusterStateRegistry) GetUpcomingNodes() (upcomingCounts map[string]i
986
989
registeredNodeNames = map [string ][]string {}
987
990
for _ , nodeGroup := range csr .cloudProvider .NodeGroups () {
988
991
id := nodeGroup .Id ()
989
- if nodeGroup . IsUpcoming () {
992
+ if csr . asyncNodeGroupStateChecker . IsUpcoming (nodeGroup ) {
990
993
size , err := nodeGroup .TargetSize ()
991
994
if size >= 0 || err != nil {
992
995
upcomingCounts [id ] = size
0 commit comments