Skip to content

Commit 32c2bdb

Browse files
authored
chore(k8s): add support for quick retries with cassettes (#1195)
1 parent 76cc42b commit 32c2bdb

File tree

3 files changed

+37
-13
lines changed

3 files changed

+37
-13
lines changed

scaleway/helpers_k8s.go

+33-9
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
const (
1616
defaultK8SClusterTimeout = 15 * time.Minute
1717
defaultK8SPoolTimeout = 15 * time.Minute
18+
defaultK8SRetryInterval = 5 * time.Second
1819
)
1920

2021
func k8sAPIWithRegion(d *schema.ResourceData, m interface{}) (*k8s.API, scw.Region, error) {
@@ -75,30 +76,48 @@ func k8sGetLatestVersionFromMinor(ctx context.Context, k8sAPI *k8s.API, region s
7576
}
7677

7778
func waitK8SCluster(ctx context.Context, k8sAPI *k8s.API, region scw.Region, clusterID string, timeout time.Duration) (*k8s.Cluster, error) {
78-
return k8sAPI.WaitForCluster(&k8s.WaitForClusterRequest{
79+
retryInterval := defaultK8SRetryInterval
80+
if DefaultWaitRetryInterval != nil {
81+
retryInterval = *DefaultWaitRetryInterval
82+
}
83+
84+
cluster, err := k8sAPI.WaitForCluster(&k8s.WaitForClusterRequest{
7985
ClusterID: clusterID,
8086
Region: region,
8187
Timeout: scw.TimeDurationPtr(timeout),
82-
RetryInterval: DefaultWaitRetryInterval,
88+
RetryInterval: &retryInterval,
8389
}, scw.WithContext(ctx))
90+
91+
return cluster, err
8492
}
8593

8694
func waitK8SClusterPool(ctx context.Context, k8sAPI *k8s.API, region scw.Region, clusterID string, timeout time.Duration) (*k8s.Cluster, error) {
95+
retryInterval := defaultK8SRetryInterval
96+
if DefaultWaitRetryInterval != nil {
97+
retryInterval = *DefaultWaitRetryInterval
98+
}
99+
87100
return k8sAPI.WaitForClusterPool(&k8s.WaitForClusterRequest{
88101
ClusterID: clusterID,
89102
Region: region,
90103
Timeout: scw.TimeDurationPtr(timeout),
91-
RetryInterval: DefaultWaitRetryInterval,
104+
RetryInterval: &retryInterval,
92105
}, scw.WithContext(ctx))
93106
}
94107

95108
func waitK8SClusterDeleted(ctx context.Context, k8sAPI *k8s.API, region scw.Region, clusterID string, timeout time.Duration) error {
109+
retryInterval := defaultK8SRetryInterval
110+
if DefaultWaitRetryInterval != nil {
111+
retryInterval = *DefaultWaitRetryInterval
112+
}
113+
96114
cluster, err := k8sAPI.WaitForCluster(&k8s.WaitForClusterRequest{
97115
ClusterID: clusterID,
98116
Region: region,
99117
Timeout: scw.TimeDurationPtr(timeout),
100-
RetryInterval: DefaultWaitRetryInterval,
118+
RetryInterval: &retryInterval,
101119
}, scw.WithContext(ctx))
120+
102121
if err != nil {
103122
if is404Error(err) {
104123
return nil
@@ -109,22 +128,27 @@ func waitK8SClusterDeleted(ctx context.Context, k8sAPI *k8s.API, region scw.Regi
109128
return fmt.Errorf("cluster %s has state %s, wants %s", clusterID, cluster.Status, k8s.ClusterStatusDeleted)
110129
}
111130

112-
func waitK8SPoolReady(ctx context.Context, k8sAPI *k8s.API, region scw.Region, poolID string, timeout time.Duration) error {
131+
func waitK8SPoolReady(ctx context.Context, k8sAPI *k8s.API, region scw.Region, poolID string, timeout time.Duration) (*k8s.Pool, error) {
132+
retryInterval := defaultK8SRetryInterval
133+
if DefaultWaitRetryInterval != nil {
134+
retryInterval = *DefaultWaitRetryInterval
135+
}
136+
113137
pool, err := k8sAPI.WaitForPool(&k8s.WaitForPoolRequest{
114138
PoolID: poolID,
115139
Region: region,
116140
Timeout: scw.TimeDurationPtr(timeout),
117-
RetryInterval: DefaultWaitRetryInterval,
141+
RetryInterval: &retryInterval,
118142
}, scw.WithContext(ctx))
119143

120144
if err != nil {
121-
return err
145+
return nil, err
122146
}
123147

124148
if pool.Status != k8s.PoolStatusReady {
125-
return fmt.Errorf("pool %s has state %s, wants %s", poolID, pool.Status, k8s.PoolStatusReady)
149+
return nil, fmt.Errorf("pool %s has state %s, wants %s", poolID, pool.Status, k8s.PoolStatusReady)
126150
}
127-
return nil
151+
return pool, nil
128152
}
129153

130154
// convert a list of nodes to a list of map

scaleway/resource_k8s_cluster.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -380,13 +380,13 @@ func resourceScalewayK8SClusterCreate(ctx context.Context, d *schema.ResourceDat
380380
return diag.FromErr(err)
381381
}
382382

383+
d.SetId(newRegionalIDString(region, res.ID))
384+
383385
res, err = waitK8SClusterPool(ctx, k8sAPI, region, res.ID, d.Timeout(schema.TimeoutCreate))
384386
if err != nil {
385387
return diag.FromErr(err)
386388
}
387389

388-
d.SetId(newRegionalIDString(region, res.ID))
389-
390390
return resourceScalewayK8SClusterRead(ctx, d, meta)
391391
}
392392

scaleway/resource_k8s_pool.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ func resourceScalewayK8SPoolCreate(ctx context.Context, d *schema.ResourceData,
279279
d.SetId(newRegionalIDString(region, res.ID))
280280

281281
if d.Get("wait_for_pool_ready").(bool) { // wait for the pool to be ready if specified (including all its nodes)
282-
err = waitK8SPoolReady(ctx, k8sAPI, region, res.ID, d.Timeout(schema.TimeoutCreate))
282+
_, err = waitK8SPoolReady(ctx, k8sAPI, region, res.ID, d.Timeout(schema.TimeoutCreate))
283283
if err != nil {
284284
return diag.FromErr(err)
285285
}
@@ -412,7 +412,7 @@ func resourceScalewayK8SPoolUpdate(ctx context.Context, d *schema.ResourceData,
412412
}
413413

414414
if d.Get("wait_for_pool_ready").(bool) { // wait for the pool to be ready if specified (including all its nodes)
415-
err = waitK8SPoolReady(ctx, k8sAPI, region, res.ID, d.Timeout(schema.TimeoutUpdate))
415+
_, err = waitK8SPoolReady(ctx, k8sAPI, region, res.ID, d.Timeout(schema.TimeoutUpdate))
416416
if err != nil {
417417
return diag.FromErr(err)
418418
}

0 commit comments

Comments
 (0)