@@ -5,79 +5,93 @@ import (
55 "fmt"
66 "math/rand/v2"
77 "net"
8+ "net/netip"
9+ "slices"
810 "testing"
911 "time"
1012
1113 "github.com/go-logr/logr/testr"
1214 "github.com/prometheus/client_golang/prometheus"
1315 "github.com/stretchr/testify/assert"
1416 "github.com/stretchr/testify/require"
17+ "go.uber.org/zap"
1518 "sigs.k8s.io/controller-runtime/pkg/log"
1619
20+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1721 "k8s.io/apimachinery/pkg/types"
1822
1923 nfake "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned/fake"
2024 helpers "github.com/neondatabase/autoscaling/pkg/agent/core/testhelpers"
2125 "github.com/neondatabase/autoscaling/pkg/neonvm/ipam"
22- "github.com/neondatabase/autoscaling/pkg/util"
26+ "github.com/neondatabase/autoscaling/pkg/util/taskgroup "
2327)
2428
2529type managerTest struct {
2630 t * testing.T
2731 ctx context.Context
2832
29- manager * ipam.Manager
30- prom * prometheus.Registry
33+ neonvmClient * nfake.Clientset
34+ manager * ipam.Manager
35+ prom * prometheus.Registry
3136
3237 vmStates map [types.UID ]* vmState
3338 ipStates map [string ]* ipState
3439 clock * helpers.FakeClock
3540}
3641
37- func newManagerTest ( t * testing. T ) * managerTest {
42+ func ( m * managerTest ) init () {
3843 managerCfg := & ipam.IPAMManagerConfig {
3944 CooldownPeriod : "70s" ,
4045 HighIPCount : 100 ,
4146 LowIPCount : 50 ,
4247 TargetIPCount : 75 ,
4348 }
44- require .NoError (t , managerCfg .Normalize ())
49+ require .NoError (m . t , managerCfg .Normalize ())
4550
4651 rangeCfg := & ipam.RangeConfiguration {
4752 Range : "10.100.0.0/16" ,
4853 RangeStart : net .ParseIP ("10.100.0.1" ),
4954 RangeEnd : nil ,
5055 OmitRanges : nil ,
5156 }
52- require .NoError (t , rangeCfg .Normalize ())
57+ require .NoError (m . t , rangeCfg .Normalize ())
5358
54- neonvmClient := nfake .NewSimpleClientset ()
5559 prom := prometheus .NewRegistry ()
5660 metrics := ipam .NewIPAMMetrics (prom )
57- poolClient , err := ipam .NewPoolClient (neonvmClient , rangeCfg , "test" , "default" , metrics )
58- require .NoError (t , err )
61+ poolClient , err := ipam .NewPoolClient (m . neonvmClient , rangeCfg , "test" , "default" , metrics )
62+ require .NoError (m . t , err )
5963
64+ manager , err := ipam .NewManager (m .ctx , m .clock .Now , managerCfg , poolClient )
65+ require .NoError (m .t , err )
66+
67+ metrics .AddManager (manager )
68+
69+ m .manager = manager
70+ m .prom = prom
71+ }
72+
73+ func newManagerTest (t * testing.T ) * managerTest {
6074 logger := testr .New (t )
6175 ctx := log .IntoContext (context .Background (), logger )
6276
6377 clock := helpers .NewFakeClock (t )
78+ neonvmClient := nfake .NewSimpleClientset ()
6479
65- manager , err := ipam .NewManager (ctx , clock .Now , managerCfg , poolClient )
66- require .NoError (t , err )
67-
68- metrics .AddManager (manager )
69-
70- return & managerTest {
80+ m := & managerTest {
7181 t : t ,
7282 ctx : ctx ,
7383
74- manager : manager ,
75- prom : prom ,
84+ neonvmClient : neonvmClient ,
85+ manager : nil ,
86+ prom : nil ,
7687
7788 vmStates : make (map [types.UID ]* vmState ),
7889 ipStates : make (map [string ]* ipState ),
7990 clock : clock ,
8091 }
92+ m .init ()
93+
94+ return m
8195}
8296
8397func generateAcquire (vmIDfrom , vmIDto int , actionsFrom , actionsTo int ) []managerTestStep {
@@ -203,13 +217,6 @@ func (m *managerTest) run(steps []managerTestStep) {
203217 }
204218}
205219
206- func vmID (name string ) util.NamespacedName {
207- return util.NamespacedName {
208- Namespace : "default" ,
209- Name : name ,
210- }
211- }
212-
213220type managerMetricValue struct {
214221 Name string
215222 Pool string
@@ -342,3 +349,132 @@ func TestManagerProduction(t *testing.T) {
342349 {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
343350 }, metrics )
344351}
352+
353+ func TestRestart (t * testing.T ) {
354+ manager := newManagerTest (t )
355+
356+ manager .run (generateAcquire (0 , 10 , 2 , 5 ))
357+
358+ metrics := manager .collectMetrics ()
359+ assert .ElementsMatch (t , []managerMetricValue {
360+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 10 },
361+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 40 },
362+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 0 },
363+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 0 },
364+
365+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "managed" , Count : 50 },
366+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
367+ }, metrics )
368+
369+ pool , err := manager .neonvmClient .NeonvmV1 ().IPPools ("default" ).Get (manager .ctx , "test-10.100.0.0-16" , metav1.GetOptions {})
370+ require .NoError (t , err )
371+ assert .Equal (t , 50 , len (pool .Spec .Managed ))
372+
373+ manager .init ()
374+
375+ metrics = manager .collectMetrics ()
376+ assert .ElementsMatch (t , []managerMetricValue {
377+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 0 },
378+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 0 },
379+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 50 },
380+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 0 },
381+
382+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
383+ }, metrics )
384+
385+ manager .run (generateAcquire (10 , 11 , 1 , 2 ))
386+
387+ metrics = manager .collectMetrics ()
388+ assert .ElementsMatch (t , []managerMetricValue {
389+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 1 },
390+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 49 },
391+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 50 },
392+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 0 },
393+
394+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "managed" , Count : 100 },
395+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
396+ }, metrics )
397+
398+ // Now we simulate only 1 VM still running for the new manager.
399+ vm0 := types .UID ("vm-0" )
400+ ip0 := manager .vmStates [vm0 ].ip
401+
402+ manager .vmStates = map [types.UID ]* vmState {
403+ vm0 : {
404+ ip : ip0 ,
405+ },
406+ }
407+ manager .ipStates = map [string ]* ipState {
408+ ip0 .String (): {
409+ vmID : vm0 ,
410+ cooldownDeadline : time.Time {},
411+ },
412+ }
413+
414+ manager .manager .SetActive (map [netip.Addr ]types.UID {
415+ netip .MustParseAddr (ip0 .String ()): vm0 ,
416+ })
417+
418+ metrics = manager .collectMetrics ()
419+ assert .ElementsMatch (t , []managerMetricValue {
420+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 2 }, // vm0 and vm10
421+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 49 },
422+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 0 },
423+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 49 },
424+
425+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "managed" , Count : 100 },
426+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
427+ }, metrics )
428+ }
429+
430+ func TestAsyncRebalance (t * testing.T ) {
431+ manager := newManagerTest (t )
432+
433+ manager .run (generateAcquire (0 , 40 , 2 , 5 ))
434+ metrics := manager .collectMetrics ()
435+ assert .ElementsMatch (t , []managerMetricValue {
436+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 40 },
437+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 10 },
438+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 0 },
439+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 0 },
440+
441+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "managed" , Count : 50 },
442+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
443+ }, metrics )
444+
445+ ctx , cancel := context .WithCancel (manager .ctx )
446+ tg := taskgroup .NewGroup (zap .NewNop (), taskgroup .WithParentContext (ctx ))
447+ tg .Go ("run" , func (_ * zap.Logger ) error {
448+ manager .manager .Run (ctx )
449+ return nil
450+ })
451+
452+ for i := 0 ; i < 10 ; i ++ {
453+ metrics = manager .collectMetrics ()
454+ if slices .Contains (metrics , managerMetricValue {
455+ Name : "ipam_manager_ip_count" ,
456+ Pool : "test-10.100.0.0-16" ,
457+ State : "free" ,
458+ Count : 50 ,
459+ }) {
460+ break
461+ }
462+ time .Sleep (100 * time .Millisecond )
463+ }
464+
465+ assert .ElementsMatch (t , []managerMetricValue {
466+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "allocated" , Count : 40 },
467+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "free" , Count : 50 },
468+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "unknown" , Count : 0 },
469+ {Name : "ipam_manager_ip_count" , Pool : "test-10.100.0.0-16" , State : "cooldown" , Count : 0 },
470+
471+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "managed" , Count : 90 },
472+ {Name : "ipam_pool_ip_count" , Pool : "test-10.100.0.0-16" , State : "total" , Count : 65535 },
473+ }, metrics )
474+
475+ cancel ()
476+ err := tg .Wait ()
477+ require .NoError (t , err )
478+
479+ manager .init ()
480+ }
0 commit comments