Skip to content

Commit 53cb4ff

Browse files
remyleoneCodelax
andauthored
fix(instance): Fix block volume detach when no additional volumes remaining (#789)
Co-authored-by: Jules Casteran <[email protected]>
1 parent 618b500 commit 53cb4ff

4 files changed

+5043
-25
lines changed

scaleway/resource_instance_server.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -677,12 +677,16 @@ func resourceScalewayInstanceServerUpdate(ctx context.Context, d *schema.Resourc
677677

678678
volumes := map[string]*instance.VolumeServerTemplate{}
679679

680-
if raw, ok := d.GetOk("additional_volume_ids"); d.HasChange("additional_volume_ids") && ok {
680+
if raw, hasAdditionalVolumes := d.GetOk("additional_volume_ids"); d.HasChange("additional_volume_ids") {
681681
volumes["0"] = &instance.VolumeServerTemplate{
682682
ID: expandZonedID(d.Get("root_volume.0.volume_id")).ID,
683683
Name: newRandomName("vol"), // name is ignored by the API, any name will work here
684684
}
685685

686+
if !hasAdditionalVolumes {
687+
raw = []interface{}{} // Set an empty list if not volumes exist
688+
}
689+
686690
for i, volumeID := range raw.([]interface{}) {
687691
volumeHasChange := d.HasChange("additional_volume_ids." + strconv.Itoa(i))
688692
// local volumes can only be added when the instance is stopped

scaleway/resource_instance_server_test.go

+67
Original file line numberDiff line numberDiff line change
@@ -543,6 +543,73 @@ func TestAccScalewayInstanceServer_AdditionalVolumes(t *testing.T) {
543543
})
544544
}
545545

546+
func TestAccScalewayInstanceServer_AdditionalVolumesDetach(t *testing.T) {
547+
tt := NewTestTools(t)
548+
defer tt.Cleanup()
549+
resource.Test(t, resource.TestCase{
550+
PreCheck: func() { testAccPreCheck(t) },
551+
ProviderFactories: tt.ProviderFactories,
552+
CheckDestroy: resource.ComposeTestCheckFunc(
553+
testAccCheckScalewayInstanceVolumeDestroy(tt),
554+
testAccCheckScalewayInstanceServerDestroy(tt),
555+
),
556+
Steps: []resource.TestStep{
557+
{
558+
Config: `
559+
variable "zone" {
560+
type = string
561+
default = "fr-par-1"
562+
}
563+
564+
resource "scaleway_instance_volume" "main" {
565+
type = "b_ssd"
566+
name = "foobar"
567+
size_in_gb = 1
568+
}
569+
570+
resource "scaleway_instance_server" "main" {
571+
type = "DEV1-S"
572+
image = "ubuntu_focal"
573+
name = "foobar"
574+
575+
enable_dynamic_ip = true
576+
577+
additional_volume_ids = [scaleway_instance_volume.main.id]
578+
}
579+
`,
580+
},
581+
{
582+
Config: `
583+
variable "zone" {
584+
type = string
585+
default = "fr-par-1"
586+
}
587+
588+
resource "scaleway_instance_volume" "main" {
589+
type = "b_ssd"
590+
name = "foobar"
591+
size_in_gb = 1
592+
}
593+
594+
resource "scaleway_instance_server" "main" {
595+
type = "DEV1-S"
596+
image = "ubuntu_focal"
597+
name = "foobar"
598+
599+
enable_dynamic_ip = true
600+
601+
additional_volume_ids = []
602+
}
603+
`,
604+
Check: resource.ComposeTestCheckFunc(
605+
testAccCheckScalewayInstanceVolumeExists(tt, "scaleway_instance_volume.main"),
606+
resource.TestCheckResourceAttr("scaleway_instance_server.main", "additional_volume_ids.#", "0"),
607+
),
608+
},
609+
},
610+
})
611+
}
612+
546613
func TestAccScalewayInstanceServer_WithPlacementGroup(t *testing.T) {
547614
tt := NewTestTools(t)
548615
defer tt.Cleanup()

scaleway/resource_instance_volume.go

+20-24
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import (
55
"fmt"
66

77
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
8-
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
98
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
109
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
1110
"github.com/scaleway/scaleway-sdk-go/api/instance/v1"
@@ -237,35 +236,32 @@ func resourceScalewayInstanceVolumeDelete(ctx context.Context, d *schema.Resourc
237236
return diag.FromErr(err)
238237
}
239238

240-
err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
241-
volumeResp, err := instanceAPI.GetVolume(&instance.GetVolumeRequest{
242-
Zone: zone,
243-
VolumeID: id,
244-
})
245-
if err != nil {
246-
if is404Error(err) {
247-
return nil
248-
}
249-
return resource.NonRetryableError(err)
239+
volume, err := instanceAPI.WaitForVolume(&instance.WaitForVolumeRequest{
240+
Zone: zone,
241+
VolumeID: id,
242+
RetryInterval: DefaultWaitRetryInterval,
243+
Timeout: scw.TimeDurationPtr(d.Timeout(schema.TimeoutDelete)),
244+
}, scw.WithContext(ctx))
245+
if err != nil {
246+
if is404Error(err) {
247+
return nil
250248
}
249+
return diag.FromErr(err)
250+
}
251251

252-
if volumeResp.Volume.Server != nil {
253-
return resource.RetryableError(fmt.Errorf("volume is still attached to a server"))
254-
}
252+
if volume.Server != nil {
253+
return diag.FromErr(fmt.Errorf("volume is still attached to a server"))
254+
}
255255

256-
deleteRequest := &instance.DeleteVolumeRequest{
257-
Zone: zone,
258-
VolumeID: id,
259-
}
256+
deleteRequest := &instance.DeleteVolumeRequest{
257+
Zone: zone,
258+
VolumeID: id,
259+
}
260260

261-
err = instanceAPI.DeleteVolume(deleteRequest, scw.WithContext(ctx))
262-
if err != nil {
263-
return resource.NonRetryableError(err)
264-
}
265-
return nil
266-
})
261+
err = instanceAPI.DeleteVolume(deleteRequest, scw.WithContext(ctx))
267262
if err != nil {
268263
return diag.FromErr(err)
269264
}
265+
270266
return nil
271267
}

0 commit comments

Comments
 (0)